From e0ea692c5a813622f22a095a371b48e34e787cbe Mon Sep 17 00:00:00 2001 From: Owl Bot Date: Thu, 2 Dec 2021 17:59:24 +0000 Subject: [PATCH] =?UTF-8?q?=F0=9F=A6=89=20Updates=20from=20OwlBot?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --- google/cloud/aiplatform_v1/__init__.py | 12 + .../cloud/aiplatform_v1/gapic_metadata.json | 10 + .../services/endpoint_service/async_client.py | 20 +- .../services/endpoint_service/client.py | 20 +- .../featurestore_service/async_client.py | 64 +- .../services/featurestore_service/client.py | 64 +- .../index_endpoint_service/async_client.py | 99 + .../services/index_endpoint_service/client.py | 99 + .../index_endpoint_service/transports/base.py | 14 + .../index_endpoint_service/transports/grpc.py | 29 + .../transports/grpc_asyncio.py | 30 + .../services/job_service/async_client.py | 3 +- .../services/job_service/client.py | 3 +- .../services/migration_service/client.py | 22 +- .../services/model_service/async_client.py | 10 +- .../services/model_service/client.py | 10 +- .../services/model_service/transports/grpc.py | 10 +- .../model_service/transports/grpc_asyncio.py | 10 +- .../services/pipeline_service/async_client.py | 2 +- .../services/pipeline_service/client.py | 2 +- .../pipeline_service/transports/grpc.py | 2 +- .../transports/grpc_asyncio.py | 2 +- .../services/vizier_service/async_client.py | 12 +- .../services/vizier_service/client.py | 12 +- .../vizier_service/transports/grpc.py | 6 +- .../vizier_service/transports/grpc_asyncio.py | 6 +- google/cloud/aiplatform_v1/types/__init__.py | 12 + google/cloud/aiplatform_v1/types/artifact.py | 6 +- .../types/batch_prediction_job.py | 23 +- .../cloud/aiplatform_v1/types/custom_job.py | 9 +- google/cloud/aiplatform_v1/types/endpoint.py | 30 +- .../aiplatform_v1/types/endpoint_service.py | 11 + google/cloud/aiplatform_v1/types/execution.py | 2 +- .../cloud/aiplatform_v1/types/explanation.py | 41 + .../cloud/aiplatform_v1/types/featurestore.py | 2 +- .../types/featurestore_online_service.py | 5 +- .../types/featurestore_service.py | 60 +- .../aiplatform_v1/types/index_endpoint.py | 31 +- .../types/index_endpoint_service.py | 57 + .../cloud/aiplatform_v1/types/job_service.py | 2 +- .../types/model_deployment_monitoring_job.py | 7 +- .../aiplatform_v1/types/model_monitoring.py | 23 +- .../cloud/aiplatform_v1/types/pipeline_job.py | 19 +- .../aiplatform_v1/types/pipeline_service.py | 1 + google/cloud/aiplatform_v1/types/study.py | 32 +- .../aiplatform_v1/types/training_pipeline.py | 51 + .../types/unmanaged_container_model.py | 18 +- google/cloud/aiplatform_v1beta1/__init__.py | 12 + .../aiplatform_v1beta1/gapic_metadata.json | 10 + .../services/endpoint_service/async_client.py | 20 +- .../services/endpoint_service/client.py | 20 +- .../featurestore_service/async_client.py | 82 +- .../services/featurestore_service/client.py | 82 +- .../index_endpoint_service/async_client.py | 99 + .../services/index_endpoint_service/client.py | 99 + .../index_endpoint_service/transports/base.py | 14 + .../index_endpoint_service/transports/grpc.py | 29 + .../transports/grpc_asyncio.py | 30 + .../services/job_service/async_client.py | 3 +- .../services/job_service/client.py | 3 +- .../services/model_service/async_client.py | 11 +- .../services/model_service/client.py | 11 +- .../services/model_service/transports/grpc.py | 11 +- .../model_service/transports/grpc_asyncio.py | 11 +- .../services/pipeline_service/async_client.py | 2 +- .../services/pipeline_service/client.py | 2 +- .../pipeline_service/transports/grpc.py | 2 +- .../transports/grpc_asyncio.py | 2 +- .../prediction_service/async_client.py | 13 +- .../services/prediction_service/client.py | 13 +- .../prediction_service/transports/grpc.py | 13 +- .../transports/grpc_asyncio.py | 13 +- .../services/vizier_service/async_client.py | 12 +- .../services/vizier_service/client.py | 12 +- .../vizier_service/transports/grpc.py | 6 +- .../vizier_service/transports/grpc_asyncio.py | 6 +- .../aiplatform_v1beta1/types/__init__.py | 12 + .../aiplatform_v1beta1/types/artifact.py | 6 +- .../types/batch_prediction_job.py | 23 +- .../aiplatform_v1beta1/types/custom_job.py | 9 +- .../aiplatform_v1beta1/types/endpoint.py | 40 +- .../types/endpoint_service.py | 11 + .../aiplatform_v1beta1/types/execution.py | 2 +- .../aiplatform_v1beta1/types/explanation.py | 41 + .../cloud/aiplatform_v1beta1/types/feature.py | 2 +- .../aiplatform_v1beta1/types/featurestore.py | 5 +- .../types/featurestore_online_service.py | 5 +- .../types/featurestore_service.py | 60 +- .../types/index_endpoint.py | 36 +- .../types/index_endpoint_service.py | 57 + .../aiplatform_v1beta1/types/job_service.py | 2 +- .../types/metadata_schema.py | 3 +- .../types/model_deployment_monitoring_job.py | 7 +- .../types/model_monitoring.py | 23 +- .../aiplatform_v1beta1/types/pipeline_job.py | 19 +- .../types/pipeline_service.py | 2 + .../types/prediction_service.py | 10 +- .../cloud/aiplatform_v1beta1/types/study.py | 32 +- .../types/training_pipeline.py | 51 + .../types/unmanaged_container_model.py | 18 +- owl-bot-staging/v1/.coveragerc | 17 - owl-bot-staging/v1/MANIFEST.in | 2 - owl-bot-staging/v1/README.rst | 49 - .../v1/docs/aiplatform_v1/dataset_service.rst | 10 - .../docs/aiplatform_v1/endpoint_service.rst | 10 - .../featurestore_online_serving_service.rst | 6 - .../aiplatform_v1/featurestore_service.rst | 10 - .../aiplatform_v1/index_endpoint_service.rst | 10 - .../v1/docs/aiplatform_v1/index_service.rst | 10 - .../v1/docs/aiplatform_v1/job_service.rst | 10 - .../docs/aiplatform_v1/metadata_service.rst | 10 - .../docs/aiplatform_v1/migration_service.rst | 10 - .../v1/docs/aiplatform_v1/model_service.rst | 10 - .../docs/aiplatform_v1/pipeline_service.rst | 10 - .../docs/aiplatform_v1/prediction_service.rst | 6 - .../v1/docs/aiplatform_v1/services.rst | 20 - .../aiplatform_v1/specialist_pool_service.rst | 10 - .../aiplatform_v1/tensorboard_service.rst | 10 - .../v1/docs/aiplatform_v1/types.rst | 7 - .../v1/docs/aiplatform_v1/vizier_service.rst | 10 - owl-bot-staging/v1/docs/conf.py | 376 - .../v1/docs/definition_v1/services.rst | 4 - .../v1/docs/definition_v1/types.rst | 7 - owl-bot-staging/v1/docs/index.rst | 7 - .../v1/docs/instance_v1/services.rst | 4 - owl-bot-staging/v1/docs/instance_v1/types.rst | 7 - .../v1/docs/params_v1/services.rst | 4 - owl-bot-staging/v1/docs/params_v1/types.rst | 7 - .../v1/docs/prediction_v1/services.rst | 4 - .../v1/docs/prediction_v1/types.rst | 7 - .../v1/google/cloud/aiplatform/__init__.py | 927 -- .../v1/google/cloud/aiplatform/py.typed | 2 - .../v1/schema/predict/instance/__init__.py | 37 - .../v1/schema/predict/instance/py.typed | 2 - .../v1/schema/predict/instance_v1/__init__.py | 38 - .../predict/instance_v1/gapic_metadata.json | 7 - .../v1/schema/predict/instance_v1/py.typed | 2 - .../predict/instance_v1/services/__init__.py | 15 - .../predict/instance_v1/types/__init__.py | 54 - .../instance_v1/types/image_classification.py | 56 - .../types/image_object_detection.py | 56 - .../instance_v1/types/image_segmentation.py | 50 - .../instance_v1/types/text_classification.py | 49 - .../instance_v1/types/text_extraction.py | 62 - .../instance_v1/types/text_sentiment.py | 49 - .../types/video_action_recognition.py | 73 - .../instance_v1/types/video_classification.py | 73 - .../types/video_object_tracking.py | 73 - .../v1/schema/predict/params/__init__.py | 31 - .../v1/schema/predict/params/py.typed | 2 - .../v1/schema/predict/params_v1/__init__.py | 32 - .../predict/params_v1/gapic_metadata.json | 7 - .../v1/schema/predict/params_v1/py.typed | 2 - .../predict/params_v1/services/__init__.py | 15 - .../predict/params_v1/types/__init__.py | 42 - .../params_v1/types/image_classification.py | 52 - .../params_v1/types/image_object_detection.py | 53 - .../params_v1/types/image_segmentation.py | 45 - .../types/video_action_recognition.py | 53 - .../params_v1/types/video_classification.py | 95 - .../params_v1/types/video_object_tracking.py | 61 - .../v1/schema/predict/prediction/__init__.py | 39 - .../v1/schema/predict/prediction/py.typed | 2 - .../schema/predict/prediction_v1/__init__.py | 40 - .../predict/prediction_v1/gapic_metadata.json | 7 - .../v1/schema/predict/prediction_v1/py.typed | 2 - .../prediction_v1/services/__init__.py | 15 - .../predict/prediction_v1/types/__init__.py | 58 - .../prediction_v1/types/classification.py | 57 - .../types/image_object_detection.py | 73 - .../prediction_v1/types/image_segmentation.py | 62 - .../types/tabular_classification.py | 52 - .../prediction_v1/types/tabular_regression.py | 53 - .../prediction_v1/types/text_extraction.py | 78 - .../prediction_v1/types/text_sentiment.py | 48 - .../types/video_action_recognition.py | 85 - .../types/video_classification.py | 103 - .../types/video_object_tracking.py | 145 - .../schema/trainingjob/definition/__init__.py | 69 - .../v1/schema/trainingjob/definition/py.typed | 2 - .../trainingjob/definition_v1/__init__.py | 70 - .../definition_v1/gapic_metadata.json | 7 - .../schema/trainingjob/definition_v1/py.typed | 2 - .../definition_v1/services/__init__.py | 15 - .../definition_v1/types/__init__.py | 90 - .../types/automl_image_classification.py | 158 - .../types/automl_image_object_detection.py | 139 - .../types/automl_image_segmentation.py | 133 - .../definition_v1/types/automl_tables.py | 529 - .../types/automl_text_classification.py | 58 - .../types/automl_text_extraction.py | 49 - .../types/automl_text_sentiment.py | 67 - .../types/automl_video_action_recognition.py | 66 - .../types/automl_video_classification.py | 65 - .../types/automl_video_object_tracking.py | 68 - .../export_evaluated_data_items_config.py | 57 - .../v1/google/cloud/aiplatform_v1/__init__.py | 928 -- .../cloud/aiplatform_v1/gapic_metadata.json | 2059 ---- .../v1/google/cloud/aiplatform_v1/py.typed | 2 - .../cloud/aiplatform_v1/services/__init__.py | 15 - .../services/dataset_service/__init__.py | 22 - .../services/dataset_service/async_client.py | 1083 -- .../services/dataset_service/client.py | 1308 --- .../services/dataset_service/pagers.py | 387 - .../dataset_service/transports/__init__.py | 33 - .../dataset_service/transports/base.py | 282 - .../dataset_service/transports/grpc.py | 511 - .../transports/grpc_asyncio.py | 515 - .../services/endpoint_service/__init__.py | 22 - .../services/endpoint_service/async_client.py | 889 -- .../services/endpoint_service/client.py | 1112 -- .../services/endpoint_service/pagers.py | 141 - .../endpoint_service/transports/__init__.py | 33 - .../endpoint_service/transports/base.py | 239 - .../endpoint_service/transports/grpc.py | 434 - .../transports/grpc_asyncio.py | 438 - .../__init__.py | 22 - .../async_client.py | 332 - .../client.py | 530 - .../transports/__init__.py | 33 - .../transports/base.py | 160 - .../transports/grpc.py | 285 - .../transports/grpc_asyncio.py | 289 - .../services/featurestore_service/__init__.py | 22 - .../featurestore_service/async_client.py | 2222 ---- .../services/featurestore_service/client.py | 2438 ----- .../services/featurestore_service/pagers.py | 509 - .../transports/__init__.py | 33 - .../featurestore_service/transports/base.py | 424 - .../featurestore_service/transports/grpc.py | 805 -- .../transports/grpc_asyncio.py | 809 -- .../index_endpoint_service/__init__.py | 22 - .../index_endpoint_service/async_client.py | 925 -- .../services/index_endpoint_service/client.py | 1132 -- .../services/index_endpoint_service/pagers.py | 141 - .../transports/__init__.py | 33 - .../index_endpoint_service/transports/base.py | 253 - .../index_endpoint_service/transports/grpc.py | 462 - .../transports/grpc_asyncio.py | 466 - .../services/index_service/__init__.py | 22 - .../services/index_service/async_client.py | 640 -- .../services/index_service/client.py | 847 -- .../services/index_service/pagers.py | 141 - .../index_service/transports/__init__.py | 33 - .../services/index_service/transports/base.py | 210 - .../services/index_service/transports/grpc.py | 381 - .../index_service/transports/grpc_asyncio.py | 385 - .../services/job_service/__init__.py | 22 - .../services/job_service/async_client.py | 2649 ----- .../services/job_service/client.py | 2937 ----- .../services/job_service/pagers.py | 756 -- .../job_service/transports/__init__.py | 33 - .../services/job_service/transports/base.py | 542 - .../services/job_service/transports/grpc.py | 1045 -- .../job_service/transports/grpc_asyncio.py | 1049 -- .../services/metadata_service/__init__.py | 22 - .../services/metadata_service/async_client.py | 2980 ----- .../services/metadata_service/client.py | 3214 ------ .../services/metadata_service/pagers.py | 633 -- .../metadata_service/transports/__init__.py | 33 - .../metadata_service/transports/base.py | 583 - .../metadata_service/transports/grpc.py | 1084 -- .../transports/grpc_asyncio.py | 1088 -- .../services/migration_service/__init__.py | 22 - .../migration_service/async_client.py | 383 - .../services/migration_service/client.py | 635 -- .../services/migration_service/pagers.py | 141 - .../migration_service/transports/__init__.py | 33 - .../migration_service/transports/base.py | 167 - .../migration_service/transports/grpc.py | 305 - .../transports/grpc_asyncio.py | 309 - .../services/model_service/__init__.py | 22 - .../services/model_service/async_client.py | 1071 -- .../services/model_service/client.py | 1305 --- .../services/model_service/pagers.py | 387 - .../model_service/transports/__init__.py | 33 - .../services/model_service/transports/base.py | 283 - .../services/model_service/transports/grpc.py | 521 - .../model_service/transports/grpc_asyncio.py | 525 - .../services/pipeline_service/__init__.py | 22 - .../services/pipeline_service/async_client.py | 1076 -- .../services/pipeline_service/client.py | 1346 --- .../services/pipeline_service/pagers.py | 264 - .../pipeline_service/transports/__init__.py | 33 - .../pipeline_service/transports/base.py | 284 - .../pipeline_service/transports/grpc.py | 541 - .../transports/grpc_asyncio.py | 545 - .../services/prediction_service/__init__.py | 22 - .../prediction_service/async_client.py | 574 - .../services/prediction_service/client.py | 781 -- .../prediction_service/transports/__init__.py | 33 - .../prediction_service/transports/base.py | 175 - .../prediction_service/transports/grpc.py | 328 - .../transports/grpc_asyncio.py | 332 - .../specialist_pool_service/__init__.py | 22 - .../specialist_pool_service/async_client.py | 658 -- .../specialist_pool_service/client.py | 856 -- .../specialist_pool_service/pagers.py | 141 - .../transports/__init__.py | 33 - .../transports/base.py | 210 - .../transports/grpc.py | 384 - .../transports/grpc_asyncio.py | 388 - .../services/tensorboard_service/__init__.py | 22 - .../tensorboard_service/async_client.py | 2711 ----- .../services/tensorboard_service/client.py | 2936 ----- .../services/tensorboard_service/pagers.py | 633 -- .../transports/__init__.py | 33 - .../tensorboard_service/transports/base.py | 539 - .../tensorboard_service/transports/grpc.py | 1005 -- .../transports/grpc_asyncio.py | 1009 -- .../services/vizier_service/__init__.py | 22 - .../services/vizier_service/async_client.py | 1292 --- .../services/vizier_service/client.py | 1513 --- .../services/vizier_service/pagers.py | 263 - .../vizier_service/transports/__init__.py | 33 - .../vizier_service/transports/base.py | 352 - .../vizier_service/transports/grpc.py | 659 -- .../vizier_service/transports/grpc_asyncio.py | 663 -- .../cloud/aiplatform_v1/types/__init__.py | 999 -- .../aiplatform_v1/types/accelerator_type.py | 38 - .../cloud/aiplatform_v1/types/annotation.py | 129 - .../aiplatform_v1/types/annotation_spec.py | 78 - .../cloud/aiplatform_v1/types/artifact.py | 153 - .../types/batch_prediction_job.py | 501 - .../aiplatform_v1/types/completion_stats.py | 63 - .../cloud/aiplatform_v1/types/context.py | 136 - .../cloud/aiplatform_v1/types/custom_job.py | 455 - .../cloud/aiplatform_v1/types/data_item.py | 101 - .../aiplatform_v1/types/data_labeling_job.py | 350 - .../cloud/aiplatform_v1/types/dataset.py | 237 - .../aiplatform_v1/types/dataset_service.py | 543 - .../aiplatform_v1/types/deployed_index_ref.py | 49 - .../aiplatform_v1/types/deployed_model_ref.py | 48 - .../aiplatform_v1/types/encryption_spec.py | 47 - .../cloud/aiplatform_v1/types/endpoint.py | 372 - .../aiplatform_v1/types/endpoint_service.py | 409 - .../cloud/aiplatform_v1/types/entity_type.py | 102 - .../cloud/aiplatform_v1/types/env_var.py | 56 - .../google/cloud/aiplatform_v1/types/event.py | 93 - .../cloud/aiplatform_v1/types/execution.py | 149 - .../cloud/aiplatform_v1/types/explanation.py | 719 -- .../types/explanation_metadata.py | 460 - .../cloud/aiplatform_v1/types/feature.py | 120 - .../types/feature_monitoring_stats.py | 124 - .../aiplatform_v1/types/feature_selector.py | 62 - .../cloud/aiplatform_v1/types/featurestore.py | 137 - .../types/featurestore_online_service.py | 382 - .../types/featurestore_service.py | 1653 --- .../types/hyperparameter_tuning_job.py | 182 - .../google/cloud/aiplatform_v1/types/index.py | 142 - .../aiplatform_v1/types/index_endpoint.py | 371 - .../types/index_endpoint_service.py | 419 - .../aiplatform_v1/types/index_service.py | 362 - .../v1/google/cloud/aiplatform_v1/types/io.py | 198 - .../cloud/aiplatform_v1/types/job_service.py | 1093 -- .../cloud/aiplatform_v1/types/job_state.py | 41 - .../aiplatform_v1/types/lineage_subgraph.py | 62 - .../aiplatform_v1/types/machine_resources.py | 310 - .../types/manual_batch_tuning_parameters.py | 50 - .../aiplatform_v1/types/metadata_schema.py | 96 - .../aiplatform_v1/types/metadata_service.py | 1477 --- .../aiplatform_v1/types/metadata_store.py | 100 - .../types/migratable_resource.py | 228 - .../aiplatform_v1/types/migration_service.py | 479 - .../google/cloud/aiplatform_v1/types/model.py | 754 -- .../types/model_deployment_monitoring_job.py | 441 - .../aiplatform_v1/types/model_evaluation.py | 99 - .../types/model_evaluation_slice.py | 110 - .../aiplatform_v1/types/model_monitoring.py | 398 - .../aiplatform_v1/types/model_service.py | 585 - .../cloud/aiplatform_v1/types/operation.py | 83 - .../cloud/aiplatform_v1/types/pipeline_job.py | 518 - .../aiplatform_v1/types/pipeline_service.py | 412 - .../aiplatform_v1/types/pipeline_state.py | 40 - .../aiplatform_v1/types/prediction_service.py | 273 - .../aiplatform_v1/types/specialist_pool.py | 86 - .../types/specialist_pool_service.py | 237 - .../google/cloud/aiplatform_v1/types/study.py | 811 -- .../cloud/aiplatform_v1/types/tensorboard.py | 131 - .../aiplatform_v1/types/tensorboard_data.py | 205 - .../types/tensorboard_experiment.py | 115 - .../aiplatform_v1/types/tensorboard_run.py | 112 - .../types/tensorboard_service.py | 1224 --- .../types/tensorboard_time_series.py | 153 - .../aiplatform_v1/types/training_pipeline.py | 632 -- .../google/cloud/aiplatform_v1/types/types.py | 86 - .../types/user_action_reference.py | 75 - .../google/cloud/aiplatform_v1/types/value.py | 69 - .../aiplatform_v1/types/vizier_service.py | 589 - owl-bot-staging/v1/mypy.ini | 3 - owl-bot-staging/v1/noxfile.py | 132 - .../scripts/fixup_aiplatform_v1_keywords.py | 359 - .../scripts/fixup_definition_v1_keywords.py | 175 - .../v1/scripts/fixup_instance_v1_keywords.py | 175 - .../v1/scripts/fixup_params_v1_keywords.py | 175 - .../scripts/fixup_prediction_v1_keywords.py | 175 - owl-bot-staging/v1/setup.py | 54 - owl-bot-staging/v1/tests/__init__.py | 16 - owl-bot-staging/v1/tests/unit/__init__.py | 16 - .../v1/tests/unit/gapic/__init__.py | 16 - .../unit/gapic/aiplatform_v1/__init__.py | 16 - .../aiplatform_v1/test_dataset_service.py | 4030 ------- .../aiplatform_v1/test_endpoint_service.py | 2977 ----- ...est_featurestore_online_serving_service.py | 1382 --- .../test_featurestore_service.py | 6638 ----------- .../test_index_endpoint_service.py | 3138 ------ .../gapic/aiplatform_v1/test_index_service.py | 2381 ---- .../gapic/aiplatform_v1/test_job_service.py | 9153 ---------------- .../aiplatform_v1/test_metadata_service.py | 9706 ----------------- .../aiplatform_v1/test_migration_service.py | 1748 --- .../gapic/aiplatform_v1/test_model_service.py | 4088 ------- .../aiplatform_v1/test_pipeline_service.py | 3953 ------- .../aiplatform_v1/test_prediction_service.py | 1734 --- .../test_specialist_pool_service.py | 2361 ---- .../aiplatform_v1/test_tensorboard_service.py | 8865 --------------- .../aiplatform_v1/test_vizier_service.py | 4630 -------- .../unit/gapic/definition_v1/__init__.py | 16 - .../tests/unit/gapic/instance_v1/__init__.py | 16 - .../v1/tests/unit/gapic/params_v1/__init__.py | 16 - .../unit/gapic/prediction_v1/__init__.py | 16 - owl-bot-staging/v1beta1/.coveragerc | 17 - owl-bot-staging/v1beta1/MANIFEST.in | 2 - owl-bot-staging/v1beta1/README.rst | 49 - .../aiplatform_v1beta1/dataset_service.rst | 10 - .../aiplatform_v1beta1/endpoint_service.rst | 10 - .../featurestore_online_serving_service.rst | 6 - .../featurestore_service.rst | 10 - .../index_endpoint_service.rst | 10 - .../docs/aiplatform_v1beta1/index_service.rst | 10 - .../docs/aiplatform_v1beta1/job_service.rst | 10 - .../aiplatform_v1beta1/metadata_service.rst | 10 - .../aiplatform_v1beta1/migration_service.rst | 10 - .../docs/aiplatform_v1beta1/model_service.rst | 10 - .../aiplatform_v1beta1/pipeline_service.rst | 10 - .../aiplatform_v1beta1/prediction_service.rst | 6 - .../docs/aiplatform_v1beta1/services.rst | 20 - .../specialist_pool_service.rst | 10 - .../tensorboard_service.rst | 10 - .../v1beta1/docs/aiplatform_v1beta1/types.rst | 7 - .../aiplatform_v1beta1/vizier_service.rst | 10 - owl-bot-staging/v1beta1/docs/conf.py | 376 - .../docs/definition_v1beta1/services.rst | 4 - .../v1beta1/docs/definition_v1beta1/types.rst | 7 - owl-bot-staging/v1beta1/docs/index.rst | 7 - .../docs/instance_v1beta1/services.rst | 4 - .../v1beta1/docs/instance_v1beta1/types.rst | 7 - .../v1beta1/docs/params_v1beta1/services.rst | 4 - .../v1beta1/docs/params_v1beta1/types.rst | 7 - .../docs/prediction_v1beta1/services.rst | 4 - .../v1beta1/docs/prediction_v1beta1/types.rst | 7 - .../google/cloud/aiplatform/__init__.py | 931 -- .../v1beta1/google/cloud/aiplatform/py.typed | 2 - .../schema/predict/instance/__init__.py | 37 - .../v1beta1/schema/predict/instance/py.typed | 2 - .../predict/instance_v1beta1/__init__.py | 38 - .../instance_v1beta1/gapic_metadata.json | 7 - .../schema/predict/instance_v1beta1/py.typed | 2 - .../instance_v1beta1/services/__init__.py | 15 - .../instance_v1beta1/types/__init__.py | 54 - .../types/image_classification.py | 56 - .../types/image_object_detection.py | 56 - .../types/image_segmentation.py | 50 - .../types/text_classification.py | 49 - .../instance_v1beta1/types/text_extraction.py | 62 - .../instance_v1beta1/types/text_sentiment.py | 49 - .../types/video_action_recognition.py | 73 - .../types/video_classification.py | 73 - .../types/video_object_tracking.py | 73 - .../v1beta1/schema/predict/params/__init__.py | 31 - .../v1beta1/schema/predict/params/py.typed | 2 - .../schema/predict/params_v1beta1/__init__.py | 32 - .../params_v1beta1/gapic_metadata.json | 7 - .../schema/predict/params_v1beta1/py.typed | 2 - .../params_v1beta1/services/__init__.py | 15 - .../predict/params_v1beta1/types/__init__.py | 42 - .../types/image_classification.py | 52 - .../types/image_object_detection.py | 53 - .../types/image_segmentation.py | 45 - .../types/video_action_recognition.py | 53 - .../types/video_classification.py | 95 - .../types/video_object_tracking.py | 61 - .../schema/predict/prediction/__init__.py | 41 - .../schema/predict/prediction/py.typed | 2 - .../predict/prediction_v1beta1/__init__.py | 42 - .../prediction_v1beta1/gapic_metadata.json | 7 - .../predict/prediction_v1beta1/py.typed | 2 - .../prediction_v1beta1/services/__init__.py | 15 - .../prediction_v1beta1/types/__init__.py | 62 - .../types/classification.py | 57 - .../types/image_object_detection.py | 73 - .../types/image_segmentation.py | 62 - .../types/tabular_classification.py | 52 - .../types/tabular_regression.py | 53 - .../types/text_extraction.py | 78 - .../types/text_sentiment.py | 48 - .../types/time_series_forecasting.py | 41 - .../types/video_action_recognition.py | 85 - .../types/video_classification.py | 103 - .../types/video_object_tracking.py | 145 - .../schema/trainingjob/definition/__init__.py | 75 - .../schema/trainingjob/definition/py.typed | 2 - .../definition_v1beta1/__init__.py | 76 - .../definition_v1beta1/gapic_metadata.json | 7 - .../trainingjob/definition_v1beta1/py.typed | 2 - .../definition_v1beta1/services/__init__.py | 15 - .../definition_v1beta1/types/__init__.py | 98 - .../types/automl_image_classification.py | 158 - .../types/automl_image_object_detection.py | 139 - .../types/automl_image_segmentation.py | 133 - .../definition_v1beta1/types/automl_tables.py | 529 - .../types/automl_text_classification.py | 58 - .../types/automl_text_extraction.py | 49 - .../types/automl_text_sentiment.py | 67 - .../types/automl_time_series_forecasting.py | 493 - .../types/automl_video_action_recognition.py | 66 - .../types/automl_video_classification.py | 65 - .../types/automl_video_object_tracking.py | 68 - .../export_evaluated_data_items_config.py | 57 - .../cloud/aiplatform_v1beta1/__init__.py | 932 -- .../aiplatform_v1beta1/gapic_metadata.json | 2059 ---- .../google/cloud/aiplatform_v1beta1/py.typed | 2 - .../aiplatform_v1beta1/services/__init__.py | 15 - .../services/dataset_service/__init__.py | 22 - .../services/dataset_service/async_client.py | 1083 -- .../services/dataset_service/client.py | 1308 --- .../services/dataset_service/pagers.py | 387 - .../dataset_service/transports/__init__.py | 33 - .../dataset_service/transports/base.py | 282 - .../dataset_service/transports/grpc.py | 511 - .../transports/grpc_asyncio.py | 515 - .../services/endpoint_service/__init__.py | 22 - .../services/endpoint_service/async_client.py | 889 -- .../services/endpoint_service/client.py | 1112 -- .../services/endpoint_service/pagers.py | 141 - .../endpoint_service/transports/__init__.py | 33 - .../endpoint_service/transports/base.py | 239 - .../endpoint_service/transports/grpc.py | 434 - .../transports/grpc_asyncio.py | 438 - .../__init__.py | 22 - .../async_client.py | 332 - .../client.py | 530 - .../transports/__init__.py | 33 - .../transports/base.py | 160 - .../transports/grpc.py | 285 - .../transports/grpc_asyncio.py | 289 - .../services/featurestore_service/__init__.py | 22 - .../featurestore_service/async_client.py | 2224 ---- .../services/featurestore_service/client.py | 2440 ----- .../services/featurestore_service/pagers.py | 509 - .../transports/__init__.py | 33 - .../featurestore_service/transports/base.py | 424 - .../featurestore_service/transports/grpc.py | 805 -- .../transports/grpc_asyncio.py | 809 -- .../index_endpoint_service/__init__.py | 22 - .../index_endpoint_service/async_client.py | 925 -- .../services/index_endpoint_service/client.py | 1132 -- .../services/index_endpoint_service/pagers.py | 141 - .../transports/__init__.py | 33 - .../index_endpoint_service/transports/base.py | 253 - .../index_endpoint_service/transports/grpc.py | 462 - .../transports/grpc_asyncio.py | 466 - .../services/index_service/__init__.py | 22 - .../services/index_service/async_client.py | 640 -- .../services/index_service/client.py | 847 -- .../services/index_service/pagers.py | 141 - .../index_service/transports/__init__.py | 33 - .../services/index_service/transports/base.py | 210 - .../services/index_service/transports/grpc.py | 381 - .../index_service/transports/grpc_asyncio.py | 385 - .../services/job_service/__init__.py | 22 - .../services/job_service/async_client.py | 2649 ----- .../services/job_service/client.py | 2937 ----- .../services/job_service/pagers.py | 756 -- .../job_service/transports/__init__.py | 33 - .../services/job_service/transports/base.py | 542 - .../services/job_service/transports/grpc.py | 1045 -- .../job_service/transports/grpc_asyncio.py | 1049 -- .../services/metadata_service/__init__.py | 22 - .../services/metadata_service/async_client.py | 2980 ----- .../services/metadata_service/client.py | 3214 ------ .../services/metadata_service/pagers.py | 633 -- .../metadata_service/transports/__init__.py | 33 - .../metadata_service/transports/base.py | 583 - .../metadata_service/transports/grpc.py | 1084 -- .../transports/grpc_asyncio.py | 1088 -- .../services/migration_service/__init__.py | 22 - .../migration_service/async_client.py | 383 - .../services/migration_service/client.py | 635 -- .../services/migration_service/pagers.py | 141 - .../migration_service/transports/__init__.py | 33 - .../migration_service/transports/base.py | 167 - .../migration_service/transports/grpc.py | 305 - .../transports/grpc_asyncio.py | 309 - .../services/model_service/__init__.py | 22 - .../services/model_service/async_client.py | 1072 -- .../services/model_service/client.py | 1306 --- .../services/model_service/pagers.py | 387 - .../model_service/transports/__init__.py | 33 - .../services/model_service/transports/base.py | 283 - .../services/model_service/transports/grpc.py | 522 - .../model_service/transports/grpc_asyncio.py | 526 - .../services/pipeline_service/__init__.py | 22 - .../services/pipeline_service/async_client.py | 1076 -- .../services/pipeline_service/client.py | 1346 --- .../services/pipeline_service/pagers.py | 264 - .../pipeline_service/transports/__init__.py | 33 - .../pipeline_service/transports/base.py | 284 - .../pipeline_service/transports/grpc.py | 541 - .../transports/grpc_asyncio.py | 545 - .../services/prediction_service/__init__.py | 22 - .../prediction_service/async_client.py | 574 - .../services/prediction_service/client.py | 781 -- .../prediction_service/transports/__init__.py | 33 - .../prediction_service/transports/base.py | 175 - .../prediction_service/transports/grpc.py | 328 - .../transports/grpc_asyncio.py | 332 - .../specialist_pool_service/__init__.py | 22 - .../specialist_pool_service/async_client.py | 658 -- .../specialist_pool_service/client.py | 856 -- .../specialist_pool_service/pagers.py | 141 - .../transports/__init__.py | 33 - .../transports/base.py | 210 - .../transports/grpc.py | 384 - .../transports/grpc_asyncio.py | 388 - .../services/tensorboard_service/__init__.py | 22 - .../tensorboard_service/async_client.py | 2711 ----- .../services/tensorboard_service/client.py | 2936 ----- .../services/tensorboard_service/pagers.py | 633 -- .../transports/__init__.py | 33 - .../tensorboard_service/transports/base.py | 538 - .../tensorboard_service/transports/grpc.py | 1005 -- .../transports/grpc_asyncio.py | 1009 -- .../services/vizier_service/__init__.py | 22 - .../services/vizier_service/async_client.py | 1292 --- .../services/vizier_service/client.py | 1513 --- .../services/vizier_service/pagers.py | 263 - .../vizier_service/transports/__init__.py | 33 - .../vizier_service/transports/base.py | 352 - .../vizier_service/transports/grpc.py | 659 -- .../vizier_service/transports/grpc_asyncio.py | 663 -- .../aiplatform_v1beta1/types/__init__.py | 1005 -- .../types/accelerator_type.py | 38 - .../aiplatform_v1beta1/types/annotation.py | 129 - .../types/annotation_spec.py | 78 - .../aiplatform_v1beta1/types/artifact.py | 153 - .../types/batch_prediction_job.py | 502 - .../types/completion_stats.py | 63 - .../cloud/aiplatform_v1beta1/types/context.py | 136 - .../aiplatform_v1beta1/types/custom_job.py | 438 - .../aiplatform_v1beta1/types/data_item.py | 101 - .../types/data_labeling_job.py | 350 - .../cloud/aiplatform_v1beta1/types/dataset.py | 237 - .../types/dataset_service.py | 543 - .../types/deployed_index_ref.py | 49 - .../types/deployed_model_ref.py | 48 - .../types/encryption_spec.py | 47 - .../aiplatform_v1beta1/types/endpoint.py | 371 - .../types/endpoint_service.py | 395 - .../aiplatform_v1beta1/types/entity_type.py | 118 - .../cloud/aiplatform_v1beta1/types/env_var.py | 56 - .../cloud/aiplatform_v1beta1/types/event.py | 93 - .../aiplatform_v1beta1/types/execution.py | 149 - .../aiplatform_v1beta1/types/explanation.py | 760 -- .../types/explanation_metadata.py | 460 - .../cloud/aiplatform_v1beta1/types/feature.py | 153 - .../types/feature_monitoring_stats.py | 124 - .../types/feature_selector.py | 62 - .../aiplatform_v1beta1/types/featurestore.py | 137 - .../types/featurestore_monitoring.py | 94 - .../types/featurestore_online_service.py | 382 - .../types/featurestore_service.py | 1653 --- .../types/hyperparameter_tuning_job.py | 182 - .../cloud/aiplatform_v1beta1/types/index.py | 142 - .../types/index_endpoint.py | 371 - .../types/index_endpoint_service.py | 419 - .../aiplatform_v1beta1/types/index_service.py | 362 - .../cloud/aiplatform_v1beta1/types/io.py | 198 - .../aiplatform_v1beta1/types/job_service.py | 1093 -- .../aiplatform_v1beta1/types/job_state.py | 41 - .../types/lineage_subgraph.py | 62 - .../types/machine_resources.py | 310 - .../types/manual_batch_tuning_parameters.py | 50 - .../types/metadata_schema.py | 96 - .../types/metadata_service.py | 1479 --- .../types/metadata_store.py | 100 - .../types/migratable_resource.py | 228 - .../types/migration_service.py | 479 - .../cloud/aiplatform_v1beta1/types/model.py | 754 -- .../types/model_deployment_monitoring_job.py | 441 - .../types/model_evaluation.py | 134 - .../types/model_evaluation_slice.py | 110 - .../types/model_monitoring.py | 398 - .../aiplatform_v1beta1/types/model_service.py | 571 - .../aiplatform_v1beta1/types/operation.py | 83 - .../aiplatform_v1beta1/types/pipeline_job.py | 518 - .../types/pipeline_service.py | 412 - .../types/pipeline_state.py | 40 - .../types/prediction_service.py | 276 - .../types/specialist_pool.py | 86 - .../types/specialist_pool_service.py | 237 - .../cloud/aiplatform_v1beta1/types/study.py | 882 -- .../aiplatform_v1beta1/types/tensorboard.py | 131 - .../types/tensorboard_data.py | 205 - .../types/tensorboard_experiment.py | 115 - .../types/tensorboard_run.py | 112 - .../types/tensorboard_service.py | 1224 --- .../types/tensorboard_time_series.py | 153 - .../types/training_pipeline.py | 633 -- .../cloud/aiplatform_v1beta1/types/types.py | 86 - .../types/user_action_reference.py | 75 - .../cloud/aiplatform_v1beta1/types/value.py | 69 - .../types/vizier_service.py | 589 - owl-bot-staging/v1beta1/mypy.ini | 3 - owl-bot-staging/v1beta1/noxfile.py | 132 - .../fixup_aiplatform_v1beta1_keywords.py | 359 - .../fixup_definition_v1beta1_keywords.py | 175 - .../fixup_instance_v1beta1_keywords.py | 175 - .../scripts/fixup_params_v1beta1_keywords.py | 175 - .../fixup_prediction_v1beta1_keywords.py | 175 - owl-bot-staging/v1beta1/setup.py | 54 - owl-bot-staging/v1beta1/tests/__init__.py | 16 - .../v1beta1/tests/unit/__init__.py | 16 - .../v1beta1/tests/unit/gapic/__init__.py | 16 - .../unit/gapic/aiplatform_v1beta1/__init__.py | 16 - .../test_dataset_service.py | 4030 ------- .../test_endpoint_service.py | 2978 ----- ...est_featurestore_online_serving_service.py | 1382 --- .../test_featurestore_service.py | 6641 ----------- .../test_index_endpoint_service.py | 3138 ------ .../aiplatform_v1beta1/test_index_service.py | 2381 ---- .../aiplatform_v1beta1/test_job_service.py | 9153 ---------------- .../test_metadata_service.py | 9706 ----------------- .../test_migration_service.py | 1748 --- .../aiplatform_v1beta1/test_model_service.py | 4088 ------- .../test_pipeline_service.py | 3953 ------- .../test_prediction_service.py | 1735 --- .../test_specialist_pool_service.py | 2361 ---- .../test_tensorboard_service.py | 8862 --------------- .../aiplatform_v1beta1/test_vizier_service.py | 4630 -------- .../unit/gapic/definition_v1beta1/__init__.py | 16 - .../unit/gapic/instance_v1beta1/__init__.py | 16 - .../unit/gapic/params_v1beta1/__init__.py | 16 - .../unit/gapic/prediction_v1beta1/__init__.py | 16 - .../aiplatform_v1/test_endpoint_service.py | 24 +- .../test_featurestore_service.py | 36 +- .../test_index_endpoint_service.py | 255 + .../gapic/aiplatform_v1/test_job_service.py | 2 + .../aiplatform_v1/test_migration_service.py | 28 +- .../test_endpoint_service.py | 24 +- .../test_featurestore_service.py | 36 +- .../test_index_endpoint_service.py | 255 + .../aiplatform_v1beta1/test_job_service.py | 3 + 752 files changed, 2489 insertions(+), 339382 deletions(-) rename {owl-bot-staging/v1/google => google}/cloud/aiplatform_v1/types/unmanaged_container_model.py (82%) rename {owl-bot-staging/v1beta1/google => google}/cloud/aiplatform_v1beta1/types/unmanaged_container_model.py (81%) delete mode 100644 owl-bot-staging/v1/.coveragerc delete mode 100644 owl-bot-staging/v1/MANIFEST.in delete mode 100644 owl-bot-staging/v1/README.rst delete mode 100644 owl-bot-staging/v1/docs/aiplatform_v1/dataset_service.rst delete mode 100644 owl-bot-staging/v1/docs/aiplatform_v1/endpoint_service.rst delete mode 100644 owl-bot-staging/v1/docs/aiplatform_v1/featurestore_online_serving_service.rst delete mode 100644 owl-bot-staging/v1/docs/aiplatform_v1/featurestore_service.rst delete mode 100644 owl-bot-staging/v1/docs/aiplatform_v1/index_endpoint_service.rst delete mode 100644 owl-bot-staging/v1/docs/aiplatform_v1/index_service.rst delete mode 100644 owl-bot-staging/v1/docs/aiplatform_v1/job_service.rst delete mode 100644 owl-bot-staging/v1/docs/aiplatform_v1/metadata_service.rst delete mode 100644 owl-bot-staging/v1/docs/aiplatform_v1/migration_service.rst delete mode 100644 owl-bot-staging/v1/docs/aiplatform_v1/model_service.rst delete mode 100644 owl-bot-staging/v1/docs/aiplatform_v1/pipeline_service.rst delete mode 100644 owl-bot-staging/v1/docs/aiplatform_v1/prediction_service.rst delete mode 100644 owl-bot-staging/v1/docs/aiplatform_v1/services.rst delete mode 100644 owl-bot-staging/v1/docs/aiplatform_v1/specialist_pool_service.rst delete mode 100644 owl-bot-staging/v1/docs/aiplatform_v1/tensorboard_service.rst delete mode 100644 owl-bot-staging/v1/docs/aiplatform_v1/types.rst delete mode 100644 owl-bot-staging/v1/docs/aiplatform_v1/vizier_service.rst delete mode 100644 owl-bot-staging/v1/docs/conf.py delete mode 100644 owl-bot-staging/v1/docs/definition_v1/services.rst delete mode 100644 owl-bot-staging/v1/docs/definition_v1/types.rst delete mode 100644 owl-bot-staging/v1/docs/index.rst delete mode 100644 owl-bot-staging/v1/docs/instance_v1/services.rst delete mode 100644 owl-bot-staging/v1/docs/instance_v1/types.rst delete mode 100644 owl-bot-staging/v1/docs/params_v1/services.rst delete mode 100644 owl-bot-staging/v1/docs/params_v1/types.rst delete mode 100644 owl-bot-staging/v1/docs/prediction_v1/services.rst delete mode 100644 owl-bot-staging/v1/docs/prediction_v1/types.rst delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/py.typed delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance/py.typed delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_metadata.json delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/py.typed delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/services/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_classification.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_object_detection.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_segmentation.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_classification.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_extraction.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_sentiment.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_action_recognition.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_classification.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_object_tracking.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params/py.typed delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_metadata.json delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/py.typed delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/services/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_classification.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_object_detection.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_segmentation.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_action_recognition.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_classification.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_object_tracking.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction/py.typed delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_metadata.json delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/py.typed delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/services/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/classification.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_object_detection.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_segmentation.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_classification.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_regression.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_extraction.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_sentiment.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_action_recognition.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_classification.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_object_tracking.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition/py.typed delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/gapic_metadata.json delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/py.typed delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/services/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_classification.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_object_detection.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_segmentation.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_tables.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_classification.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_extraction.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_sentiment.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_action_recognition.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_classification.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_object_tracking.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/export_evaluated_data_items_config.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/gapic_metadata.json delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/py.typed delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/async_client.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/client.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/pagers.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/base.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/client.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/pagers.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/base.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/async_client.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/client.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/base.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/grpc.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/async_client.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/client.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/pagers.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/transports/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/transports/base.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/transports/grpc.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/async_client.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/client.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/pagers.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/base.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/async_client.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/client.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/pagers.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/transports/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/transports/base.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/transports/grpc.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/async_client.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/client.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/pagers.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/base.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/grpc.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/async_client.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/client.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/pagers.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/transports/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/transports/base.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/transports/grpc.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/async_client.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/client.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/pagers.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/base.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/grpc.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/async_client.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/client.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/pagers.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/base.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/grpc.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/client.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/pagers.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/base.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/async_client.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/client.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/base.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/async_client.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/client.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/pagers.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/base.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/async_client.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/client.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/pagers.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/transports/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/transports/base.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/transports/grpc.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/async_client.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/client.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/pagers.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/transports/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/transports/base.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/transports/grpc.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/__init__.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/accelerator_type.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/annotation.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/annotation_spec.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/artifact.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/batch_prediction_job.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/completion_stats.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/context.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/custom_job.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/data_item.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/data_labeling_job.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/dataset.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/dataset_service.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/deployed_index_ref.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/deployed_model_ref.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/encryption_spec.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/endpoint.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/endpoint_service.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/entity_type.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/env_var.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/event.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/execution.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/explanation.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/explanation_metadata.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/feature.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/feature_monitoring_stats.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/feature_selector.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/featurestore.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/featurestore_online_service.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/featurestore_service.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/hyperparameter_tuning_job.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/index.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/index_endpoint.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/index_endpoint_service.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/index_service.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/io.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/job_service.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/job_state.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/lineage_subgraph.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/machine_resources.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/manual_batch_tuning_parameters.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/metadata_schema.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/metadata_service.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/metadata_store.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/migratable_resource.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/migration_service.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_deployment_monitoring_job.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_evaluation.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_evaluation_slice.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_monitoring.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_service.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/operation.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/pipeline_job.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/pipeline_service.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/pipeline_state.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/prediction_service.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/specialist_pool.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/specialist_pool_service.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/study.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/tensorboard.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/tensorboard_data.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/tensorboard_experiment.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/tensorboard_run.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/tensorboard_service.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/tensorboard_time_series.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/training_pipeline.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/types.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/user_action_reference.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/value.py delete mode 100644 owl-bot-staging/v1/google/cloud/aiplatform_v1/types/vizier_service.py delete mode 100644 owl-bot-staging/v1/mypy.ini delete mode 100644 owl-bot-staging/v1/noxfile.py delete mode 100644 owl-bot-staging/v1/scripts/fixup_aiplatform_v1_keywords.py delete mode 100644 owl-bot-staging/v1/scripts/fixup_definition_v1_keywords.py delete mode 100644 owl-bot-staging/v1/scripts/fixup_instance_v1_keywords.py delete mode 100644 owl-bot-staging/v1/scripts/fixup_params_v1_keywords.py delete mode 100644 owl-bot-staging/v1/scripts/fixup_prediction_v1_keywords.py delete mode 100644 owl-bot-staging/v1/setup.py delete mode 100644 owl-bot-staging/v1/tests/__init__.py delete mode 100644 owl-bot-staging/v1/tests/unit/__init__.py delete mode 100644 owl-bot-staging/v1/tests/unit/gapic/__init__.py delete mode 100644 owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/__init__.py delete mode 100644 owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_dataset_service.py delete mode 100644 owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_endpoint_service.py delete mode 100644 owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_featurestore_online_serving_service.py delete mode 100644 owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_featurestore_service.py delete mode 100644 owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_index_endpoint_service.py delete mode 100644 owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_index_service.py delete mode 100644 owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_job_service.py delete mode 100644 owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_metadata_service.py delete mode 100644 owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_migration_service.py delete mode 100644 owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_model_service.py delete mode 100644 owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_pipeline_service.py delete mode 100644 owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_prediction_service.py delete mode 100644 owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_specialist_pool_service.py delete mode 100644 owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_tensorboard_service.py delete mode 100644 owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_vizier_service.py delete mode 100644 owl-bot-staging/v1/tests/unit/gapic/definition_v1/__init__.py delete mode 100644 owl-bot-staging/v1/tests/unit/gapic/instance_v1/__init__.py delete mode 100644 owl-bot-staging/v1/tests/unit/gapic/params_v1/__init__.py delete mode 100644 owl-bot-staging/v1/tests/unit/gapic/prediction_v1/__init__.py delete mode 100644 owl-bot-staging/v1beta1/.coveragerc delete mode 100644 owl-bot-staging/v1beta1/MANIFEST.in delete mode 100644 owl-bot-staging/v1beta1/README.rst delete mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/dataset_service.rst delete mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/endpoint_service.rst delete mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/featurestore_online_serving_service.rst delete mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/featurestore_service.rst delete mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/index_endpoint_service.rst delete mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/index_service.rst delete mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/job_service.rst delete mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/metadata_service.rst delete mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/migration_service.rst delete mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/model_service.rst delete mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/pipeline_service.rst delete mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/prediction_service.rst delete mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/services.rst delete mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/specialist_pool_service.rst delete mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/tensorboard_service.rst delete mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/types.rst delete mode 100644 owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/vizier_service.rst delete mode 100644 owl-bot-staging/v1beta1/docs/conf.py delete mode 100644 owl-bot-staging/v1beta1/docs/definition_v1beta1/services.rst delete mode 100644 owl-bot-staging/v1beta1/docs/definition_v1beta1/types.rst delete mode 100644 owl-bot-staging/v1beta1/docs/index.rst delete mode 100644 owl-bot-staging/v1beta1/docs/instance_v1beta1/services.rst delete mode 100644 owl-bot-staging/v1beta1/docs/instance_v1beta1/types.rst delete mode 100644 owl-bot-staging/v1beta1/docs/params_v1beta1/services.rst delete mode 100644 owl-bot-staging/v1beta1/docs/params_v1beta1/types.rst delete mode 100644 owl-bot-staging/v1beta1/docs/prediction_v1beta1/services.rst delete mode 100644 owl-bot-staging/v1beta1/docs/prediction_v1beta1/types.rst delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/py.typed delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance/py.typed delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_metadata.json delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/py.typed delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/services/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_segmentation.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_classification.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_sentiment.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_action_recognition.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_classification.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_object_tracking.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params/py.typed delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_metadata.json delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/py.typed delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/services/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_classification.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_object_detection.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_segmentation.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_action_recognition.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_object_tracking.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction/py.typed delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_metadata.json delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/py.typed delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/services/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_object_detection.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_segmentation.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_classification.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_regression.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_extraction.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_sentiment.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/time_series_forecasting.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_action_recognition.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_classification.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_object_tracking.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/py.typed delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/gapic_metadata.json delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/py.typed delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/services/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_classification.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_object_detection.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_segmentation.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_tables.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_classification.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_extraction.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_sentiment.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_time_series_forecasting.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_action_recognition.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_classification.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_object_tracking.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/export_evaluated_data_items_config.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/gapic_metadata.json delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/py.typed delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/pagers.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/base.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/pagers.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/base.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/async_client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/base.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/grpc.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/async_client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/pagers.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/base.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/async_client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/pagers.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/base.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/async_client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/pagers.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/base.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/pagers.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/base.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/async_client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/pagers.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/base.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/async_client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/pagers.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/base.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/pagers.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/base.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/pagers.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/base.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/base.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/async_client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/pagers.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/base.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/async_client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/pagers.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/base.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/async_client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/client.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/pagers.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/base.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/accelerator_type.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/annotation.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/annotation_spec.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/artifact.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/completion_stats.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/context.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/custom_job.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/data_item.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/data_labeling_job.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/dataset.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/dataset_service.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/deployed_index_ref.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/deployed_model_ref.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/encryption_spec.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/endpoint.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/endpoint_service.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/entity_type.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/env_var.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/event.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/execution.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/explanation.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/explanation_metadata.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/feature.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/feature_monitoring_stats.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/feature_selector.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore_monitoring.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore_online_service.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore_service.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/hyperparameter_tuning_job.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index_endpoint.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index_endpoint_service.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index_service.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/io.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/job_service.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/job_state.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/lineage_subgraph.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/machine_resources.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/manual_batch_tuning_parameters.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/metadata_schema.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/metadata_service.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/metadata_store.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/migratable_resource.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/migration_service.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_deployment_monitoring_job.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_evaluation.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_evaluation_slice.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_monitoring.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_service.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/operation.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/pipeline_job.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/pipeline_service.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/pipeline_state.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/prediction_service.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/specialist_pool.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/specialist_pool_service.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/study.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_data.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_experiment.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_run.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_service.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_time_series.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/training_pipeline.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/types.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/user_action_reference.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/value.py delete mode 100644 owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/vizier_service.py delete mode 100644 owl-bot-staging/v1beta1/mypy.ini delete mode 100644 owl-bot-staging/v1beta1/noxfile.py delete mode 100644 owl-bot-staging/v1beta1/scripts/fixup_aiplatform_v1beta1_keywords.py delete mode 100644 owl-bot-staging/v1beta1/scripts/fixup_definition_v1beta1_keywords.py delete mode 100644 owl-bot-staging/v1beta1/scripts/fixup_instance_v1beta1_keywords.py delete mode 100644 owl-bot-staging/v1beta1/scripts/fixup_params_v1beta1_keywords.py delete mode 100644 owl-bot-staging/v1beta1/scripts/fixup_prediction_v1beta1_keywords.py delete mode 100644 owl-bot-staging/v1beta1/setup.py delete mode 100644 owl-bot-staging/v1beta1/tests/__init__.py delete mode 100644 owl-bot-staging/v1beta1/tests/unit/__init__.py delete mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/__init__.py delete mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/__init__.py delete mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_dataset_service.py delete mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_endpoint_service.py delete mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_online_serving_service.py delete mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_service.py delete mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_index_endpoint_service.py delete mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_index_service.py delete mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_job_service.py delete mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_metadata_service.py delete mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py delete mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_model_service.py delete mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_pipeline_service.py delete mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_prediction_service.py delete mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_specialist_pool_service.py delete mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_tensorboard_service.py delete mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_vizier_service.py delete mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/definition_v1beta1/__init__.py delete mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/instance_v1beta1/__init__.py delete mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/params_v1beta1/__init__.py delete mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/prediction_v1beta1/__init__.py diff --git a/google/cloud/aiplatform_v1/__init__.py b/google/cloud/aiplatform_v1/__init__.py index 4f592b0fc3..6ac922497a 100644 --- a/google/cloud/aiplatform_v1/__init__.py +++ b/google/cloud/aiplatform_v1/__init__.py @@ -112,6 +112,7 @@ from .types.event import Event from .types.execution import Execution from .types.explanation import Attribution +from .types.explanation import BlurBaselineConfig from .types.explanation import Explanation from .types.explanation import ExplanationMetadataOverride from .types.explanation import ExplanationParameters @@ -187,6 +188,9 @@ from .types.index_endpoint_service import GetIndexEndpointRequest from .types.index_endpoint_service import ListIndexEndpointsRequest from .types.index_endpoint_service import ListIndexEndpointsResponse +from .types.index_endpoint_service import MutateDeployedIndexOperationMetadata +from .types.index_endpoint_service import MutateDeployedIndexRequest +from .types.index_endpoint_service import MutateDeployedIndexResponse from .types.index_endpoint_service import UndeployIndexOperationMetadata from .types.index_endpoint_service import UndeployIndexRequest from .types.index_endpoint_service import UndeployIndexResponse @@ -445,12 +449,14 @@ from .types.training_pipeline import FractionSplit from .types.training_pipeline import InputDataConfig from .types.training_pipeline import PredefinedSplit +from .types.training_pipeline import StratifiedSplit from .types.training_pipeline import TimestampSplit from .types.training_pipeline import TrainingPipeline from .types.types import BoolArray from .types.types import DoubleArray from .types.types import Int64Array from .types.types import StringArray +from .types.unmanaged_container_model import UnmanagedContainerModel from .types.user_action_reference import UserActionReference from .types.value import Value from .types.vizier_service import AddTrialMeasurementRequest @@ -527,6 +533,7 @@ "BatchReadTensorboardTimeSeriesDataResponse", "BigQueryDestination", "BigQuerySource", + "BlurBaselineConfig", "BoolArray", "CancelBatchPredictionJobRequest", "CancelCustomJobRequest", @@ -810,6 +817,9 @@ "ModelMonitoringObjectiveConfig", "ModelMonitoringStatsAnomalies", "ModelServiceClient", + "MutateDeployedIndexOperationMetadata", + "MutateDeployedIndexRequest", + "MutateDeployedIndexResponse", "NearestNeighborSearchOperationMetadata", "PauseModelDeploymentMonitoringJobRequest", "PipelineJob", @@ -862,6 +872,7 @@ "SpecialistPool", "SpecialistPoolServiceClient", "StopTrialRequest", + "StratifiedSplit", "StreamingReadFeatureValuesRequest", "StringArray", "Study", @@ -891,6 +902,7 @@ "UndeployModelOperationMetadata", "UndeployModelRequest", "UndeployModelResponse", + "UnmanagedContainerModel", "UpdateArtifactRequest", "UpdateContextRequest", "UpdateDatasetRequest", diff --git a/google/cloud/aiplatform_v1/gapic_metadata.json b/google/cloud/aiplatform_v1/gapic_metadata.json index bb61cffbb0..b7e8b7361a 100644 --- a/google/cloud/aiplatform_v1/gapic_metadata.json +++ b/google/cloud/aiplatform_v1/gapic_metadata.json @@ -481,6 +481,11 @@ "list_index_endpoints" ] }, + "MutateDeployedIndex": { + "methods": [ + "mutate_deployed_index" + ] + }, "UndeployIndex": { "methods": [ "undeploy_index" @@ -521,6 +526,11 @@ "list_index_endpoints" ] }, + "MutateDeployedIndex": { + "methods": [ + "mutate_deployed_index" + ] + }, "UndeployIndex": { "methods": [ "undeploy_index" diff --git a/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py b/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py index 0ec7562ca7..7ffd118709 100644 --- a/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py @@ -191,6 +191,7 @@ async def create_endpoint( *, parent: str = None, endpoint: gca_endpoint.Endpoint = None, + endpoint_id: str = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -214,6 +215,21 @@ async def create_endpoint( This corresponds to the ``endpoint`` field on the ``request`` instance; if ``request`` is provided, this should not be set. + endpoint_id (:class:`str`): + Immutable. The ID to use for endpoint, which will become + the final component of the endpoint resource name. If + not provided, Vertex AI will generate a value for this + ID. + + This value should be 1-10 characters, and valid + characters are /[0-9]/. When using HTTP/JSON, this field + is populated based on a query string argument, such as + ``?endpoint_id=12345``. This is the fallback for fields + that are not included in either the URI or the body. + + This corresponds to the ``endpoint_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -231,7 +247,7 @@ async def create_endpoint( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, endpoint]) + has_flattened_params = any([parent, endpoint, endpoint_id]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -246,6 +262,8 @@ async def create_endpoint( request.parent = parent if endpoint is not None: request.endpoint = endpoint + if endpoint_id is not None: + request.endpoint_id = endpoint_id # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. diff --git a/google/cloud/aiplatform_v1/services/endpoint_service/client.py b/google/cloud/aiplatform_v1/services/endpoint_service/client.py index ac8c621838..3f20e28f33 100644 --- a/google/cloud/aiplatform_v1/services/endpoint_service/client.py +++ b/google/cloud/aiplatform_v1/services/endpoint_service/client.py @@ -424,6 +424,7 @@ def create_endpoint( *, parent: str = None, endpoint: gca_endpoint.Endpoint = None, + endpoint_id: str = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -447,6 +448,21 @@ def create_endpoint( This corresponds to the ``endpoint`` field on the ``request`` instance; if ``request`` is provided, this should not be set. + endpoint_id (str): + Immutable. The ID to use for endpoint, which will become + the final component of the endpoint resource name. If + not provided, Vertex AI will generate a value for this + ID. + + This value should be 1-10 characters, and valid + characters are /[0-9]/. When using HTTP/JSON, this field + is populated based on a query string argument, such as + ``?endpoint_id=12345``. This is the fallback for fields + that are not included in either the URI or the body. + + This corresponds to the ``endpoint_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -464,7 +480,7 @@ def create_endpoint( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, endpoint]) + has_flattened_params = any([parent, endpoint, endpoint_id]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -483,6 +499,8 @@ def create_endpoint( request.parent = parent if endpoint is not None: request.endpoint = endpoint + if endpoint_id is not None: + request.endpoint_id = endpoint_id # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. diff --git a/google/cloud/aiplatform_v1/services/featurestore_service/async_client.py b/google/cloud/aiplatform_v1/services/featurestore_service/async_client.py index 2597a5473b..4f85bb6b07 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/featurestore_service/async_client.py @@ -196,6 +196,7 @@ async def create_featurestore( *, parent: str = None, featurestore: gca_featurestore.Featurestore = None, + featurestore_id: str = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -220,6 +221,21 @@ async def create_featurestore( This corresponds to the ``featurestore`` field on the ``request`` instance; if ``request`` is provided, this should not be set. + featurestore_id (:class:`str`): + Required. The ID to use for this Featurestore, which + will become the final component of the Featurestore's + resource name. + + This value may be up to 60 characters, and valid + characters are ``[a-z0-9_]``. The first character cannot + be a number. + + The value must be unique within the project and + location. + + This corresponds to the ``featurestore_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -230,7 +246,7 @@ async def create_featurestore( google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. - The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Featurestore` Vertex Feature Store provides a centralized repository for organizing, + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Featurestore` Vertex AI Feature Store provides a centralized repository for organizing, storing, and serving ML features. The Featurestore is a top-level container for your features and their values. @@ -239,7 +255,7 @@ async def create_featurestore( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, featurestore]) + has_flattened_params = any([parent, featurestore, featurestore_id]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -254,6 +270,8 @@ async def create_featurestore( request.parent = parent if featurestore is not None: request.featurestore = featurestore + if featurestore_id is not None: + request.featurestore_id = featurestore_id # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. @@ -313,7 +331,7 @@ async def get_featurestore( Returns: google.cloud.aiplatform_v1.types.Featurestore: - Vertex Feature Store provides a + Vertex AI Feature Store provides a centralized repository for organizing, storing, and serving ML features. The Featurestore is a top-level container @@ -490,7 +508,7 @@ async def update_featurestore( google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. - The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Featurestore` Vertex Feature Store provides a centralized repository for organizing, + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Featurestore` Vertex AI Feature Store provides a centralized repository for organizing, storing, and serving ML features. The Featurestore is a top-level container for your features and their values. @@ -659,6 +677,7 @@ async def create_entity_type( *, parent: str = None, entity_type: gca_entity_type.EntityType = None, + entity_type_id: str = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -682,6 +701,20 @@ async def create_entity_type( This corresponds to the ``entity_type`` field on the ``request`` instance; if ``request`` is provided, this should not be set. + entity_type_id (:class:`str`): + Required. The ID to use for the EntityType, which will + become the final component of the EntityType's resource + name. + + This value may be up to 60 characters, and valid + characters are ``[a-z0-9_]``. The first character cannot + be a number. + + The value must be unique within a featurestore. + + This corresponds to the ``entity_type_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -701,7 +734,7 @@ async def create_entity_type( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, entity_type]) + has_flattened_params = any([parent, entity_type, entity_type_id]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -716,6 +749,8 @@ async def create_entity_type( request.parent = parent if entity_type is not None: request.entity_type = entity_type + if entity_type_id is not None: + request.entity_type_id = entity_type_id # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. @@ -1115,6 +1150,7 @@ async def create_feature( *, parent: str = None, feature: gca_feature.Feature = None, + feature_id: str = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -1138,6 +1174,20 @@ async def create_feature( This corresponds to the ``feature`` field on the ``request`` instance; if ``request`` is provided, this should not be set. + feature_id (:class:`str`): + Required. The ID to use for the Feature, which will + become the final component of the Feature's resource + name. + + This value may be up to 60 characters, and valid + characters are ``[a-z0-9_]``. The first character cannot + be a number. + + The value must be unique within an EntityType. + + This corresponds to the ``feature_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1156,7 +1206,7 @@ async def create_feature( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, feature]) + has_flattened_params = any([parent, feature, feature_id]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1171,6 +1221,8 @@ async def create_feature( request.parent = parent if feature is not None: request.feature = feature + if feature_id is not None: + request.feature_id = feature_id # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. diff --git a/google/cloud/aiplatform_v1/services/featurestore_service/client.py b/google/cloud/aiplatform_v1/services/featurestore_service/client.py index 448e9e97c1..ea6a73b4af 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_service/client.py +++ b/google/cloud/aiplatform_v1/services/featurestore_service/client.py @@ -424,6 +424,7 @@ def create_featurestore( *, parent: str = None, featurestore: gca_featurestore.Featurestore = None, + featurestore_id: str = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -448,6 +449,21 @@ def create_featurestore( This corresponds to the ``featurestore`` field on the ``request`` instance; if ``request`` is provided, this should not be set. + featurestore_id (str): + Required. The ID to use for this Featurestore, which + will become the final component of the Featurestore's + resource name. + + This value may be up to 60 characters, and valid + characters are ``[a-z0-9_]``. The first character cannot + be a number. + + The value must be unique within the project and + location. + + This corresponds to the ``featurestore_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -458,7 +474,7 @@ def create_featurestore( google.api_core.operation.Operation: An object representing a long-running operation. - The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Featurestore` Vertex Feature Store provides a centralized repository for organizing, + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Featurestore` Vertex AI Feature Store provides a centralized repository for organizing, storing, and serving ML features. The Featurestore is a top-level container for your features and their values. @@ -467,7 +483,7 @@ def create_featurestore( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, featurestore]) + has_flattened_params = any([parent, featurestore, featurestore_id]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -486,6 +502,8 @@ def create_featurestore( request.parent = parent if featurestore is not None: request.featurestore = featurestore + if featurestore_id is not None: + request.featurestore_id = featurestore_id # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. @@ -541,7 +559,7 @@ def get_featurestore( Returns: google.cloud.aiplatform_v1.types.Featurestore: - Vertex Feature Store provides a + Vertex AI Feature Store provides a centralized repository for organizing, storing, and serving ML features. The Featurestore is a top-level container @@ -718,7 +736,7 @@ def update_featurestore( google.api_core.operation.Operation: An object representing a long-running operation. - The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Featurestore` Vertex Feature Store provides a centralized repository for organizing, + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Featurestore` Vertex AI Feature Store provides a centralized repository for organizing, storing, and serving ML features. The Featurestore is a top-level container for your features and their values. @@ -887,6 +905,7 @@ def create_entity_type( *, parent: str = None, entity_type: gca_entity_type.EntityType = None, + entity_type_id: str = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -910,6 +929,20 @@ def create_entity_type( This corresponds to the ``entity_type`` field on the ``request`` instance; if ``request`` is provided, this should not be set. + entity_type_id (str): + Required. The ID to use for the EntityType, which will + become the final component of the EntityType's resource + name. + + This value may be up to 60 characters, and valid + characters are ``[a-z0-9_]``. The first character cannot + be a number. + + The value must be unique within a featurestore. + + This corresponds to the ``entity_type_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -929,7 +962,7 @@ def create_entity_type( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, entity_type]) + has_flattened_params = any([parent, entity_type, entity_type_id]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -948,6 +981,8 @@ def create_entity_type( request.parent = parent if entity_type is not None: request.entity_type = entity_type + if entity_type_id is not None: + request.entity_type_id = entity_type_id # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. @@ -1343,6 +1378,7 @@ def create_feature( *, parent: str = None, feature: gca_feature.Feature = None, + feature_id: str = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -1366,6 +1402,20 @@ def create_feature( This corresponds to the ``feature`` field on the ``request`` instance; if ``request`` is provided, this should not be set. + feature_id (str): + Required. The ID to use for the Feature, which will + become the final component of the Feature's resource + name. + + This value may be up to 60 characters, and valid + characters are ``[a-z0-9_]``. The first character cannot + be a number. + + The value must be unique within an EntityType. + + This corresponds to the ``feature_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1384,7 +1434,7 @@ def create_feature( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, feature]) + has_flattened_params = any([parent, feature, feature_id]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1403,6 +1453,8 @@ def create_feature( request.parent = parent if feature is not None: request.feature = feature + if feature_id is not None: + request.feature_id = feature_id # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. diff --git a/google/cloud/aiplatform_v1/services/index_endpoint_service/async_client.py b/google/cloud/aiplatform_v1/services/index_endpoint_service/async_client.py index 51ed2f1cc9..57f3b519f9 100644 --- a/google/cloud/aiplatform_v1/services/index_endpoint_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/index_endpoint_service/async_client.py @@ -798,6 +798,105 @@ async def undeploy_index( # Done; return the response. return response + async def mutate_deployed_index( + self, + request: Union[index_endpoint_service.MutateDeployedIndexRequest, dict] = None, + *, + index_endpoint: str = None, + deployed_index: gca_index_endpoint.DeployedIndex = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Update an existing DeployedIndex under an + IndexEndpoint. + + Args: + request (Union[google.cloud.aiplatform_v1.types.MutateDeployedIndexRequest, dict]): + The request object. Request message for + [IndexEndpointService.MutateDeployedIndex][google.cloud.aiplatform.v1.IndexEndpointService.MutateDeployedIndex]. + index_endpoint (:class:`str`): + Required. The name of the IndexEndpoint resource into + which to deploy an Index. Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + + This corresponds to the ``index_endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deployed_index (:class:`google.cloud.aiplatform_v1.types.DeployedIndex`): + Required. The DeployedIndex to be updated within the + IndexEndpoint. Currently, the updatable fields are + [DeployedIndex][automatic_resources] and + [DeployedIndex][dedicated_resources] + + This corresponds to the ``deployed_index`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.MutateDeployedIndexResponse` + Response message for + [IndexEndpointService.MutateDeployedIndex][google.cloud.aiplatform.v1.IndexEndpointService.MutateDeployedIndex]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([index_endpoint, deployed_index]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = index_endpoint_service.MutateDeployedIndexRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if index_endpoint is not None: + request.index_endpoint = index_endpoint + if deployed_index is not None: + request.deployed_index = deployed_index + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.mutate_deployed_index, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("index_endpoint", request.index_endpoint),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + index_endpoint_service.MutateDeployedIndexResponse, + metadata_type=index_endpoint_service.MutateDeployedIndexOperationMetadata, + ) + + # Done; return the response. + return response + async def __aenter__(self): return self diff --git a/google/cloud/aiplatform_v1/services/index_endpoint_service/client.py b/google/cloud/aiplatform_v1/services/index_endpoint_service/client.py index cd76fb9f07..770a7989e4 100644 --- a/google/cloud/aiplatform_v1/services/index_endpoint_service/client.py +++ b/google/cloud/aiplatform_v1/services/index_endpoint_service/client.py @@ -1003,6 +1003,105 @@ def undeploy_index( # Done; return the response. return response + def mutate_deployed_index( + self, + request: Union[index_endpoint_service.MutateDeployedIndexRequest, dict] = None, + *, + index_endpoint: str = None, + deployed_index: gca_index_endpoint.DeployedIndex = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Update an existing DeployedIndex under an + IndexEndpoint. + + Args: + request (Union[google.cloud.aiplatform_v1.types.MutateDeployedIndexRequest, dict]): + The request object. Request message for + [IndexEndpointService.MutateDeployedIndex][google.cloud.aiplatform.v1.IndexEndpointService.MutateDeployedIndex]. + index_endpoint (str): + Required. The name of the IndexEndpoint resource into + which to deploy an Index. Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + + This corresponds to the ``index_endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deployed_index (google.cloud.aiplatform_v1.types.DeployedIndex): + Required. The DeployedIndex to be updated within the + IndexEndpoint. Currently, the updatable fields are + [DeployedIndex][automatic_resources] and + [DeployedIndex][dedicated_resources] + + This corresponds to the ``deployed_index`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1.types.MutateDeployedIndexResponse` + Response message for + [IndexEndpointService.MutateDeployedIndex][google.cloud.aiplatform.v1.IndexEndpointService.MutateDeployedIndex]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([index_endpoint, deployed_index]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a index_endpoint_service.MutateDeployedIndexRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, index_endpoint_service.MutateDeployedIndexRequest): + request = index_endpoint_service.MutateDeployedIndexRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if index_endpoint is not None: + request.index_endpoint = index_endpoint + if deployed_index is not None: + request.deployed_index = deployed_index + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.mutate_deployed_index] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("index_endpoint", request.index_endpoint),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + index_endpoint_service.MutateDeployedIndexResponse, + metadata_type=index_endpoint_service.MutateDeployedIndexOperationMetadata, + ) + + # Done; return the response. + return response + def __enter__(self): return self diff --git a/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/base.py b/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/base.py index 7c7d3e18b5..094ccb33c8 100644 --- a/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/base.py +++ b/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/base.py @@ -154,6 +154,11 @@ def _prep_wrapped_messages(self, client_info): self.undeploy_index: gapic_v1.method.wrap_method( self.undeploy_index, default_timeout=None, client_info=client_info, ), + self.mutate_deployed_index: gapic_v1.method.wrap_method( + self.mutate_deployed_index, + default_timeout=None, + client_info=client_info, + ), } def close(self): @@ -239,5 +244,14 @@ def undeploy_index( ]: raise NotImplementedError() + @property + def mutate_deployed_index( + self, + ) -> Callable[ + [index_endpoint_service.MutateDeployedIndexRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + __all__ = ("IndexEndpointServiceTransport",) diff --git a/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc.py index 250c67f678..340a5d2057 100644 --- a/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc.py @@ -446,6 +446,35 @@ def undeploy_index( ) return self._stubs["undeploy_index"] + @property + def mutate_deployed_index( + self, + ) -> Callable[ + [index_endpoint_service.MutateDeployedIndexRequest], operations_pb2.Operation + ]: + r"""Return a callable for the mutate deployed index method over gRPC. + + Update an existing DeployedIndex under an + IndexEndpoint. + + Returns: + Callable[[~.MutateDeployedIndexRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "mutate_deployed_index" not in self._stubs: + self._stubs["mutate_deployed_index"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.IndexEndpointService/MutateDeployedIndex", + request_serializer=index_endpoint_service.MutateDeployedIndexRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["mutate_deployed_index"] + def close(self): self.grpc_channel.close() diff --git a/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc_asyncio.py index 6afa165a08..928f1e09e0 100644 --- a/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc_asyncio.py @@ -454,6 +454,36 @@ def undeploy_index( ) return self._stubs["undeploy_index"] + @property + def mutate_deployed_index( + self, + ) -> Callable[ + [index_endpoint_service.MutateDeployedIndexRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the mutate deployed index method over gRPC. + + Update an existing DeployedIndex under an + IndexEndpoint. + + Returns: + Callable[[~.MutateDeployedIndexRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "mutate_deployed_index" not in self._stubs: + self._stubs["mutate_deployed_index"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.IndexEndpointService/MutateDeployedIndex", + request_serializer=index_endpoint_service.MutateDeployedIndexRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["mutate_deployed_index"] + def close(self): return self.grpc_channel.close() diff --git a/google/cloud/aiplatform_v1/services/job_service/async_client.py b/google/cloud/aiplatform_v1/services/job_service/async_client.py index 76c4eff06e..00a77a8788 100644 --- a/google/cloud/aiplatform_v1/services/job_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/job_service/async_client.py @@ -61,6 +61,7 @@ from google.cloud.aiplatform_v1.types import model_monitoring from google.cloud.aiplatform_v1.types import operation as gca_operation from google.cloud.aiplatform_v1.types import study +from google.cloud.aiplatform_v1.types import unmanaged_container_model from google.protobuf import duration_pb2 # type: ignore from google.protobuf import empty_pb2 # type: ignore from google.protobuf import field_mask_pb2 # type: ignore @@ -1974,7 +1975,7 @@ async def search_model_deployment_monitoring_stats_anomalies( should not be set. deployed_model_id (:class:`str`): Required. The DeployedModel ID of the - [google.cloud.aiplatform.master.ModelDeploymentMonitoringObjectiveConfig.deployed_model_id]. + [ModelDeploymentMonitoringObjectiveConfig.deployed_model_id]. This corresponds to the ``deployed_model_id`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1/services/job_service/client.py b/google/cloud/aiplatform_v1/services/job_service/client.py index 466fe1f100..91e232ef97 100644 --- a/google/cloud/aiplatform_v1/services/job_service/client.py +++ b/google/cloud/aiplatform_v1/services/job_service/client.py @@ -64,6 +64,7 @@ from google.cloud.aiplatform_v1.types import model_monitoring from google.cloud.aiplatform_v1.types import operation as gca_operation from google.cloud.aiplatform_v1.types import study +from google.cloud.aiplatform_v1.types import unmanaged_container_model from google.protobuf import duration_pb2 # type: ignore from google.protobuf import empty_pb2 # type: ignore from google.protobuf import field_mask_pb2 # type: ignore @@ -2335,7 +2336,7 @@ def search_model_deployment_monitoring_stats_anomalies( should not be set. deployed_model_id (str): Required. The DeployedModel ID of the - [google.cloud.aiplatform.master.ModelDeploymentMonitoringObjectiveConfig.deployed_model_id]. + [ModelDeploymentMonitoringObjectiveConfig.deployed_model_id]. This corresponds to the ``deployed_model_id`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1/services/migration_service/client.py b/google/cloud/aiplatform_v1/services/migration_service/client.py index 5a1e5f2c41..8e509a04a6 100644 --- a/google/cloud/aiplatform_v1/services/migration_service/client.py +++ b/google/cloud/aiplatform_v1/services/migration_service/client.py @@ -199,32 +199,32 @@ def parse_dataset_path(path: str) -> Dict[str, str]: return m.groupdict() if m else {} @staticmethod - def dataset_path(project: str, location: str, dataset: str,) -> str: + def dataset_path(project: str, dataset: str,) -> str: """Returns a fully-qualified dataset string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}".format( - project=project, location=location, dataset=dataset, + return "projects/{project}/datasets/{dataset}".format( + project=project, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parses a dataset path into its component segments.""" - m = re.match( - r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", - path, - ) + m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) return m.groupdict() if m else {} @staticmethod - def dataset_path(project: str, dataset: str,) -> str: + def dataset_path(project: str, location: str, dataset: str,) -> str: """Returns a fully-qualified dataset string.""" - return "projects/{project}/datasets/{dataset}".format( - project=project, dataset=dataset, + return "projects/{project}/locations/{location}/datasets/{dataset}".format( + project=project, location=location, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parses a dataset path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", + path, + ) return m.groupdict() if m else {} @staticmethod diff --git a/google/cloud/aiplatform_v1/services/model_service/async_client.py b/google/cloud/aiplatform_v1/services/model_service/async_client.py index 4f78912b27..8c3caae9b2 100644 --- a/google/cloud/aiplatform_v1/services/model_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/model_service/async_client.py @@ -528,8 +528,12 @@ async def delete_model( ) -> operation_async.AsyncOperation: r"""Deletes a Model. - Model can only be deleted if there are no [DeployedModels][] - created from it. + A model cannot be deleted if any + [Endpoint][google.cloud.aiplatform.v1.Endpoint] resource has a + [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] based + on the model in its + [deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models] + field. Args: request (Union[google.cloud.aiplatform_v1.types.DeleteModelRequest, dict]): @@ -623,7 +627,7 @@ async def export_model( timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: - r"""Exports a trained, exportable, Model to a location specified by + r"""Exports a trained, exportable Model to a location specified by the user. A Model is considered to be exportable if it has at least one [supported export format][google.cloud.aiplatform.v1.Model.supported_export_formats]. diff --git a/google/cloud/aiplatform_v1/services/model_service/client.py b/google/cloud/aiplatform_v1/services/model_service/client.py index 94eab48f1e..60bdad30c5 100644 --- a/google/cloud/aiplatform_v1/services/model_service/client.py +++ b/google/cloud/aiplatform_v1/services/model_service/client.py @@ -780,8 +780,12 @@ def delete_model( ) -> gac_operation.Operation: r"""Deletes a Model. - Model can only be deleted if there are no [DeployedModels][] - created from it. + A model cannot be deleted if any + [Endpoint][google.cloud.aiplatform.v1.Endpoint] resource has a + [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] based + on the model in its + [deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models] + field. Args: request (Union[google.cloud.aiplatform_v1.types.DeleteModelRequest, dict]): @@ -875,7 +879,7 @@ def export_model( timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> gac_operation.Operation: - r"""Exports a trained, exportable, Model to a location specified by + r"""Exports a trained, exportable Model to a location specified by the user. A Model is considered to be exportable if it has at least one [supported export format][google.cloud.aiplatform.v1.Model.supported_export_formats]. diff --git a/google/cloud/aiplatform_v1/services/model_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/model_service/transports/grpc.py index 66abe5707c..91751eef77 100644 --- a/google/cloud/aiplatform_v1/services/model_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/model_service/transports/grpc.py @@ -356,8 +356,12 @@ def delete_model( Deletes a Model. - Model can only be deleted if there are no [DeployedModels][] - created from it. + A model cannot be deleted if any + [Endpoint][google.cloud.aiplatform.v1.Endpoint] resource has a + [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] based + on the model in its + [deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models] + field. Returns: Callable[[~.DeleteModelRequest], @@ -383,7 +387,7 @@ def export_model( ) -> Callable[[model_service.ExportModelRequest], operations_pb2.Operation]: r"""Return a callable for the export model method over gRPC. - Exports a trained, exportable, Model to a location specified by + Exports a trained, exportable Model to a location specified by the user. A Model is considered to be exportable if it has at least one [supported export format][google.cloud.aiplatform.v1.Model.supported_export_formats]. diff --git a/google/cloud/aiplatform_v1/services/model_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/model_service/transports/grpc_asyncio.py index 40786098bf..71ae4b287e 100644 --- a/google/cloud/aiplatform_v1/services/model_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/model_service/transports/grpc_asyncio.py @@ -368,8 +368,12 @@ def delete_model( Deletes a Model. - Model can only be deleted if there are no [DeployedModels][] - created from it. + A model cannot be deleted if any + [Endpoint][google.cloud.aiplatform.v1.Endpoint] resource has a + [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] based + on the model in its + [deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models] + field. Returns: Callable[[~.DeleteModelRequest], @@ -397,7 +401,7 @@ def export_model( ]: r"""Return a callable for the export model method over gRPC. - Exports a trained, exportable, Model to a location specified by + Exports a trained, exportable Model to a location specified by the user. A Model is considered to be exportable if it has at least one [supported export format][google.cloud.aiplatform.v1.Model.supported_export_formats]. diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py b/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py index c74528fc35..c13faa5438 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py @@ -55,7 +55,7 @@ class PipelineServiceAsyncClient: """A service for creating and managing Vertex AI's pipelines. This includes both ``TrainingPipeline`` resources (used for AutoML and - custom training) and ``PipelineJob`` resources (used for Vertex + custom training) and ``PipelineJob`` resources (used for Vertex AI Pipelines). """ diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/client.py b/google/cloud/aiplatform_v1/services/pipeline_service/client.py index bb6811edef..ee9754086e 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/client.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/client.py @@ -91,7 +91,7 @@ def get_transport_class(cls, label: str = None,) -> Type[PipelineServiceTranspor class PipelineServiceClient(metaclass=PipelineServiceClientMeta): """A service for creating and managing Vertex AI's pipelines. This includes both ``TrainingPipeline`` resources (used for AutoML and - custom training) and ``PipelineJob`` resources (used for Vertex + custom training) and ``PipelineJob`` resources (used for Vertex AI Pipelines). """ diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py index fd6b0e8505..87d0dafdb1 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py @@ -40,7 +40,7 @@ class PipelineServiceGrpcTransport(PipelineServiceTransport): A service for creating and managing Vertex AI's pipelines. This includes both ``TrainingPipeline`` resources (used for AutoML and - custom training) and ``PipelineJob`` resources (used for Vertex + custom training) and ``PipelineJob`` resources (used for Vertex AI Pipelines). This class defines the same methods as the primary client, so the diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc_asyncio.py index cb13e43d0f..624014b258 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc_asyncio.py @@ -41,7 +41,7 @@ class PipelineServiceGrpcAsyncIOTransport(PipelineServiceTransport): A service for creating and managing Vertex AI's pipelines. This includes both ``TrainingPipeline`` resources (used for AutoML and - custom training) and ``PipelineJob`` resources (used for Vertex + custom training) and ``PipelineJob`` resources (used for Vertex AI Pipelines). This class defines the same methods as the primary client, so the diff --git a/google/cloud/aiplatform_v1/services/vizier_service/async_client.py b/google/cloud/aiplatform_v1/services/vizier_service/async_client.py index 60ed69480b..ced1379087 100644 --- a/google/cloud/aiplatform_v1/services/vizier_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/vizier_service/async_client.py @@ -44,8 +44,8 @@ class VizierServiceAsyncClient: - """Vertex Vizier API. - Vizier service is a GCP service to solve blackbox optimization + """Vertex AI Vizier API. + Vertex AI Vizier is a service to solve blackbox optimization problems, such as tuning machine learning hyperparameters and searching over deep learning architectures. """ @@ -219,7 +219,9 @@ async def create_study( Returns: google.cloud.aiplatform_v1.types.Study: + LINT.IfChange A message representing a Study. + """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have @@ -290,7 +292,9 @@ async def get_study( Returns: google.cloud.aiplatform_v1.types.Study: + LINT.IfChange A message representing a Study. + """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have @@ -508,7 +512,9 @@ async def lookup_study( Returns: google.cloud.aiplatform_v1.types.Study: + LINT.IfChange A message representing a Study. + """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have @@ -556,7 +562,7 @@ async def suggest_trials( metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Adds one or more Trials to a Study, with parameter values - suggested by Vertex Vizier. Returns a long-running operation + suggested by Vertex AI Vizier. Returns a long-running operation associated with the generation of Trial suggestions. When this long-running operation succeeds, it will contain a [SuggestTrialsResponse][google.cloud.ml.v1.SuggestTrialsResponse]. diff --git a/google/cloud/aiplatform_v1/services/vizier_service/client.py b/google/cloud/aiplatform_v1/services/vizier_service/client.py index 8854c4f1e3..66a9e42510 100644 --- a/google/cloud/aiplatform_v1/services/vizier_service/client.py +++ b/google/cloud/aiplatform_v1/services/vizier_service/client.py @@ -78,8 +78,8 @@ def get_transport_class(cls, label: str = None,) -> Type[VizierServiceTransport] class VizierServiceClient(metaclass=VizierServiceClientMeta): - """Vertex Vizier API. - Vizier service is a GCP service to solve blackbox optimization + """Vertex AI Vizier API. + Vertex AI Vizier is a service to solve blackbox optimization problems, such as tuning machine learning hyperparameters and searching over deep learning architectures. """ @@ -437,7 +437,9 @@ def create_study( Returns: google.cloud.aiplatform_v1.types.Study: + LINT.IfChange A message representing a Study. + """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have @@ -508,7 +510,9 @@ def get_study( Returns: google.cloud.aiplatform_v1.types.Study: + LINT.IfChange A message representing a Study. + """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have @@ -726,7 +730,9 @@ def lookup_study( Returns: google.cloud.aiplatform_v1.types.Study: + LINT.IfChange A message representing a Study. + """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have @@ -774,7 +780,7 @@ def suggest_trials( metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Adds one or more Trials to a Study, with parameter values - suggested by Vertex Vizier. Returns a long-running operation + suggested by Vertex AI Vizier. Returns a long-running operation associated with the generation of Trial suggestions. When this long-running operation succeeds, it will contain a [SuggestTrialsResponse][google.cloud.ml.v1.SuggestTrialsResponse]. diff --git a/google/cloud/aiplatform_v1/services/vizier_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/vizier_service/transports/grpc.py index 43fcb9edf4..9a3aab47d2 100644 --- a/google/cloud/aiplatform_v1/services/vizier_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1/services/vizier_service/transports/grpc.py @@ -36,8 +36,8 @@ class VizierServiceGrpcTransport(VizierServiceTransport): """gRPC backend transport for VizierService. - Vertex Vizier API. - Vizier service is a GCP service to solve blackbox optimization + Vertex AI Vizier API. + Vertex AI Vizier is a service to solve blackbox optimization problems, such as tuning machine learning hyperparameters and searching over deep learning architectures. @@ -388,7 +388,7 @@ def suggest_trials( r"""Return a callable for the suggest trials method over gRPC. Adds one or more Trials to a Study, with parameter values - suggested by Vertex Vizier. Returns a long-running operation + suggested by Vertex AI Vizier. Returns a long-running operation associated with the generation of Trial suggestions. When this long-running operation succeeds, it will contain a [SuggestTrialsResponse][google.cloud.ml.v1.SuggestTrialsResponse]. diff --git a/google/cloud/aiplatform_v1/services/vizier_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/vizier_service/transports/grpc_asyncio.py index 50dccf35aa..d9506d8902 100644 --- a/google/cloud/aiplatform_v1/services/vizier_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1/services/vizier_service/transports/grpc_asyncio.py @@ -37,8 +37,8 @@ class VizierServiceGrpcAsyncIOTransport(VizierServiceTransport): """gRPC AsyncIO backend transport for VizierService. - Vertex Vizier API. - Vizier service is a GCP service to solve blackbox optimization + Vertex AI Vizier API. + Vertex AI Vizier is a service to solve blackbox optimization problems, such as tuning machine learning hyperparameters and searching over deep learning architectures. @@ -397,7 +397,7 @@ def suggest_trials( r"""Return a callable for the suggest trials method over gRPC. Adds one or more Trials to a Study, with parameter values - suggested by Vertex Vizier. Returns a long-running operation + suggested by Vertex AI Vizier. Returns a long-running operation associated with the generation of Trial suggestions. When this long-running operation succeeds, it will contain a [SuggestTrialsResponse][google.cloud.ml.v1.SuggestTrialsResponse]. diff --git a/google/cloud/aiplatform_v1/types/__init__.py b/google/cloud/aiplatform_v1/types/__init__.py index 443b665926..7ba335642c 100644 --- a/google/cloud/aiplatform_v1/types/__init__.py +++ b/google/cloud/aiplatform_v1/types/__init__.py @@ -88,6 +88,7 @@ from .execution import Execution from .explanation import ( Attribution, + BlurBaselineConfig, Explanation, ExplanationMetadataOverride, ExplanationParameters, @@ -173,6 +174,9 @@ GetIndexEndpointRequest, ListIndexEndpointsRequest, ListIndexEndpointsResponse, + MutateDeployedIndexOperationMetadata, + MutateDeployedIndexRequest, + MutateDeployedIndexResponse, UndeployIndexOperationMetadata, UndeployIndexRequest, UndeployIndexResponse, @@ -459,6 +463,7 @@ FractionSplit, InputDataConfig, PredefinedSplit, + StratifiedSplit, TimestampSplit, TrainingPipeline, ) @@ -468,6 +473,7 @@ Int64Array, StringArray, ) +from .unmanaged_container_model import UnmanagedContainerModel from .user_action_reference import UserActionReference from .value import Value from .vizier_service import ( @@ -559,6 +565,7 @@ "Event", "Execution", "Attribution", + "BlurBaselineConfig", "Explanation", "ExplanationMetadataOverride", "ExplanationParameters", @@ -634,6 +641,9 @@ "GetIndexEndpointRequest", "ListIndexEndpointsRequest", "ListIndexEndpointsResponse", + "MutateDeployedIndexOperationMetadata", + "MutateDeployedIndexRequest", + "MutateDeployedIndexResponse", "UndeployIndexOperationMetadata", "UndeployIndexRequest", "UndeployIndexResponse", @@ -884,12 +894,14 @@ "FractionSplit", "InputDataConfig", "PredefinedSplit", + "StratifiedSplit", "TimestampSplit", "TrainingPipeline", "BoolArray", "DoubleArray", "Int64Array", "StringArray", + "UnmanagedContainerModel", "UserActionReference", "Value", "AddTrialMeasurementRequest", diff --git a/google/cloud/aiplatform_v1/types/artifact.py b/google/cloud/aiplatform_v1/types/artifact.py index aed8db7885..6042601790 100644 --- a/google/cloud/aiplatform_v1/types/artifact.py +++ b/google/cloud/aiplatform_v1/types/artifact.py @@ -62,9 +62,9 @@ class Artifact(proto.Message): The state of this Artifact. This is a property of the Artifact, and does not imply or capture any ongoing process. This property is - managed by clients (such as Vertex Pipelines), - and the system does not prescribe or check the - validity of state transitions. + managed by clients (such as Vertex AI + Pipelines), and the system does not prescribe or + check the validity of state transitions. schema_title (str): The title of the schema describing the metadata. diff --git a/google/cloud/aiplatform_v1/types/batch_prediction_job.py b/google/cloud/aiplatform_v1/types/batch_prediction_job.py index 448cd63720..350951bc17 100644 --- a/google/cloud/aiplatform_v1/types/batch_prediction_job.py +++ b/google/cloud/aiplatform_v1/types/batch_prediction_job.py @@ -24,6 +24,9 @@ from google.cloud.aiplatform_v1.types import ( manual_batch_tuning_parameters as gca_manual_batch_tuning_parameters, ) +from google.cloud.aiplatform_v1.types import ( + unmanaged_container_model as gca_unmanaged_container_model, +) from google.protobuf import struct_pb2 # type: ignore from google.protobuf import timestamp_pb2 # type: ignore from google.rpc import status_pb2 # type: ignore @@ -51,11 +54,16 @@ class BatchPredictionJob(proto.Message): Required. The user-defined name of this BatchPredictionJob. model (str): - Required. The name of the Model that produces - the predictions via this job, must share the - same ancestor Location. Starting this job has no - impact on any existing deployments of the Model - and their resources. + The name of the Model resoure that produces the predictions + via this job, must share the same ancestor Location. + Starting this job has no impact on any existing deployments + of the Model and their resources. Exactly one of model and + unmanaged_container_model must be set. + unmanaged_container_model (google.cloud.aiplatform_v1.types.UnmanagedContainerModel): + Contains model information necessary to perform batch + prediction without requiring uploading to model registry. + Exactly one of model and unmanaged_container_model must be + set. input_config (google.cloud.aiplatform_v1.types.BatchPredictionJob.InputConfig): Required. Input configuration of the instances on which predictions are performed. The schema of any single instance @@ -359,6 +367,11 @@ class OutputInfo(proto.Message): name = proto.Field(proto.STRING, number=1,) display_name = proto.Field(proto.STRING, number=2,) model = proto.Field(proto.STRING, number=3,) + unmanaged_container_model = proto.Field( + proto.MESSAGE, + number=28, + message=gca_unmanaged_container_model.UnmanagedContainerModel, + ) input_config = proto.Field(proto.MESSAGE, number=4, message=InputConfig,) model_parameters = proto.Field(proto.MESSAGE, number=5, message=struct_pb2.Value,) output_config = proto.Field(proto.MESSAGE, number=6, message=OutputConfig,) diff --git a/google/cloud/aiplatform_v1/types/custom_job.py b/google/cloud/aiplatform_v1/types/custom_job.py index 4f7849d3d5..846de2f622 100644 --- a/google/cloud/aiplatform_v1/types/custom_job.py +++ b/google/cloud/aiplatform_v1/types/custom_job.py @@ -148,9 +148,12 @@ class CustomJobSpec(proto.Message): {project} is a project number, as in ``12345``, and {network} is a network name. - Private services access must already be configured for the - network. If left unspecified, the job is not peered with any - network. + To specify this field, you must have already `configured VPC + Network Peering for Vertex + AI `__. + + If this field is left unspecified, the job is not peered + with any network. base_output_directory (google.cloud.aiplatform_v1.types.GcsDestination): The Cloud Storage location to store the output of this CustomJob or HyperparameterTuningJob. For diff --git a/google/cloud/aiplatform_v1/types/endpoint.py b/google/cloud/aiplatform_v1/types/endpoint.py index ef8fc98a22..8ea223fc73 100644 --- a/google/cloud/aiplatform_v1/types/endpoint.py +++ b/google/cloud/aiplatform_v1/types/endpoint.py @@ -92,10 +92,22 @@ class Endpoint(proto.Message): network. If left unspecified, the Endpoint is not peered with any network. + Only one of the fields, + [network][google.cloud.aiplatform.v1.Endpoint.network] or + [enable_private_service_connect][google.cloud.aiplatform.v1.Endpoint.enable_private_service_connect], + can be set. + `Format `__: ``projects/{project}/global/networks/{network}``. Where ``{project}`` is a project number, as in ``12345``, and ``{network}`` is network name. + enable_private_service_connect (bool): + If true, expose the Endpoint via private service connect. + + Only one of the fields, + [network][google.cloud.aiplatform.v1.Endpoint.network] or + [enable_private_service_connect][google.cloud.aiplatform.v1.Endpoint.enable_private_service_connect], + can be set. model_deployment_monitoring_job (str): Output only. Resource name of the Model Monitoring job associated with this Endpoint if monitoring is enabled by @@ -118,6 +130,7 @@ class Endpoint(proto.Message): proto.MESSAGE, number=10, message=gca_encryption_spec.EncryptionSpec, ) network = proto.Field(proto.STRING, number=13,) + enable_private_service_connect = proto.Field(proto.BOOL, number=17,) model_deployment_monitoring_job = proto.Field(proto.STRING, number=14,) @@ -146,7 +159,11 @@ class DeployedModel(proto.Message): This field is a member of `oneof`_ ``prediction_resources``. id (str): - Output only. The ID of the DeployedModel. + Immutable. The ID of the DeployedModel. If not provided upon + deployment, Vertex AI will generate a value for this ID. + + This value should be 1-10 characters, and valid characters + are /[0-9]/. model (str): Required. The name of the Model that this is the deployment of. Note that the Model may be in @@ -242,8 +259,10 @@ class DeployedModel(proto.Message): class PrivateEndpoints(proto.Message): - r"""PrivateEndpoints is used to provide paths for users to send - requests via private services access. + r"""PrivateEndpoints proto is used to provide paths for users to send + requests privately. To send request via private service access, use + predict_http_uri, explain_http_uri or health_http_uri. To send + request via private service connect, use service_attachment. Attributes: predict_http_uri (str): @@ -255,11 +274,16 @@ class PrivateEndpoints(proto.Message): health_http_uri (str): Output only. Http(s) path to send health check requests. + service_attachment (str): + Output only. The name of the service + attachment resource. Populated if private + service connect is enabled. """ predict_http_uri = proto.Field(proto.STRING, number=1,) explain_http_uri = proto.Field(proto.STRING, number=2,) health_http_uri = proto.Field(proto.STRING, number=3,) + service_attachment = proto.Field(proto.STRING, number=4,) __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1/types/endpoint_service.py b/google/cloud/aiplatform_v1/types/endpoint_service.py index 19e463a721..d7bf783b4a 100644 --- a/google/cloud/aiplatform_v1/types/endpoint_service.py +++ b/google/cloud/aiplatform_v1/types/endpoint_service.py @@ -51,10 +51,21 @@ class CreateEndpointRequest(proto.Message): ``projects/{project}/locations/{location}`` endpoint (google.cloud.aiplatform_v1.types.Endpoint): Required. The Endpoint to create. + endpoint_id (str): + Immutable. The ID to use for endpoint, which will become the + final component of the endpoint resource name. If not + provided, Vertex AI will generate a value for this ID. + + This value should be 1-10 characters, and valid characters + are /[0-9]/. When using HTTP/JSON, this field is populated + based on a query string argument, such as + ``?endpoint_id=12345``. This is the fallback for fields that + are not included in either the URI or the body. """ parent = proto.Field(proto.STRING, number=1,) endpoint = proto.Field(proto.MESSAGE, number=2, message=gca_endpoint.Endpoint,) + endpoint_id = proto.Field(proto.STRING, number=4,) class CreateEndpointOperationMetadata(proto.Message): diff --git a/google/cloud/aiplatform_v1/types/execution.py b/google/cloud/aiplatform_v1/types/execution.py index 2041d131fa..52acfc61aa 100644 --- a/google/cloud/aiplatform_v1/types/execution.py +++ b/google/cloud/aiplatform_v1/types/execution.py @@ -38,7 +38,7 @@ class Execution(proto.Message): The state of this Execution. This is a property of the Execution, and does not imply or capture any ongoing process. This property is - managed by clients (such as Vertex Pipelines) + managed by clients (such as Vertex AI Pipelines) and the system does not prescribe or check the validity of state transitions. etag (str): diff --git a/google/cloud/aiplatform_v1/types/explanation.py b/google/cloud/aiplatform_v1/types/explanation.py index 9023b7f688..33a688fbe0 100644 --- a/google/cloud/aiplatform_v1/types/explanation.py +++ b/google/cloud/aiplatform_v1/types/explanation.py @@ -32,6 +32,7 @@ "XraiAttribution", "SmoothGradConfig", "FeatureNoiseSigma", + "BlurBaselineConfig", "ExplanationSpecOverride", "ExplanationMetadataOverride", }, @@ -379,12 +380,22 @@ class IntegratedGradientsAttribution(proto.Message): help improve the computed gradients. Refer to this paper for more details: https://arxiv.org/pdf/1706.03825.pdf + blur_baseline_config (google.cloud.aiplatform_v1.types.BlurBaselineConfig): + Config for IG with blur baseline. + When enabled, a linear path from the maximally + blurred image to the input image is created. + Using a blurred baseline instead of zero (black + image) is motivated by the BlurIG approach + explained here: https://arxiv.org/abs/2004.03383 """ step_count = proto.Field(proto.INT32, number=1,) smooth_grad_config = proto.Field( proto.MESSAGE, number=2, message="SmoothGradConfig", ) + blur_baseline_config = proto.Field( + proto.MESSAGE, number=3, message="BlurBaselineConfig", + ) class XraiAttribution(proto.Message): @@ -412,12 +423,22 @@ class XraiAttribution(proto.Message): help improve the computed gradients. Refer to this paper for more details: https://arxiv.org/pdf/1706.03825.pdf + blur_baseline_config (google.cloud.aiplatform_v1.types.BlurBaselineConfig): + Config for XRAI with blur baseline. + When enabled, a linear path from the maximally + blurred image to the input image is created. + Using a blurred baseline instead of zero (black + image) is motivated by the BlurIG approach + explained here: https://arxiv.org/abs/2004.03383 """ step_count = proto.Field(proto.INT32, number=1,) smooth_grad_config = proto.Field( proto.MESSAGE, number=2, message="SmoothGradConfig", ) + blur_baseline_config = proto.Field( + proto.MESSAGE, number=3, message="BlurBaselineConfig", + ) class SmoothGradConfig(proto.Message): @@ -518,6 +539,26 @@ class NoiseSigmaForFeature(proto.Message): ) +class BlurBaselineConfig(proto.Message): + r"""Config for blur baseline. + When enabled, a linear path from the maximally blurred image to + the input image is created. Using a blurred baseline instead of + zero (black image) is motivated by the BlurIG approach explained + here: + https://arxiv.org/abs/2004.03383 + + Attributes: + max_blur_sigma (float): + The standard deviation of the blur kernel for + the blurred baseline. The same blurring + parameter is used for both the height and the + width dimension. If not set, the method defaults + to the zero (i.e. black for images) baseline. + """ + + max_blur_sigma = proto.Field(proto.FLOAT, number=1,) + + class ExplanationSpecOverride(proto.Message): r"""The [ExplanationSpec][google.cloud.aiplatform.v1.ExplanationSpec] entries that can be overridden at [online diff --git a/google/cloud/aiplatform_v1/types/featurestore.py b/google/cloud/aiplatform_v1/types/featurestore.py index 2377fe86a1..0f706dcffc 100644 --- a/google/cloud/aiplatform_v1/types/featurestore.py +++ b/google/cloud/aiplatform_v1/types/featurestore.py @@ -25,7 +25,7 @@ class Featurestore(proto.Message): - r"""Vertex Feature Store provides a centralized repository for + r"""Vertex AI Feature Store provides a centralized repository for organizing, storing, and serving ML features. The Featurestore is a top-level container for your features and their values. diff --git a/google/cloud/aiplatform_v1/types/featurestore_online_service.py b/google/cloud/aiplatform_v1/types/featurestore_online_service.py index 9780b90719..d62257c4ee 100644 --- a/google/cloud/aiplatform_v1/types/featurestore_online_service.py +++ b/google/cloud/aiplatform_v1/types/featurestore_online_service.py @@ -256,7 +256,10 @@ class Metadata(proto.Message): is provided by user at feature ingestion time. If not, feature store will use the system timestamp when the data is ingested into feature - store. + store. For streaming ingestion, the time, + aligned by days, must be no older than five + years (1825 days) and no later than one year + (366 days) in the future. """ generate_time = proto.Field( diff --git a/google/cloud/aiplatform_v1/types/featurestore_service.py b/google/cloud/aiplatform_v1/types/featurestore_service.py index a43870efaf..87a76d5c82 100644 --- a/google/cloud/aiplatform_v1/types/featurestore_service.py +++ b/google/cloud/aiplatform_v1/types/featurestore_service.py @@ -507,13 +507,22 @@ class ExportFeatureValuesRequest(proto.Message): r"""Request message for [FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.ExportFeatureValues]. + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields Attributes: snapshot_export (google.cloud.aiplatform_v1.types.ExportFeatureValuesRequest.SnapshotExport): - Exports Feature values of all entities of the - EntityType as of a snapshot time. + Exports the latest Feature values of all + entities of the EntityType within a time range. + + This field is a member of `oneof`_ ``mode``. + full_export (google.cloud.aiplatform_v1.types.ExportFeatureValuesRequest.FullExport): + Exports all historical values of all entities + of the EntityType within a time range This field is a member of `oneof`_ ``mode``. entity_type (str): @@ -531,8 +540,8 @@ class ExportFeatureValuesRequest(proto.Message): """ class SnapshotExport(proto.Message): - r"""Describes exporting Feature values as of the snapshot - timestamp. + r"""Describes exporting the latest Feature values of all entities of the + EntityType between [start_time, snapshot_time]. Attributes: snapshot_time (google.protobuf.timestamp_pb2.Timestamp): @@ -540,15 +549,52 @@ class SnapshotExport(proto.Message): If not set, retrieve values as of now. Timestamp, if present, must not have higher than millisecond precision. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Excludes Feature values with feature + generation timestamp before this timestamp. If + not set, retrieve oldest values kept in Feature + Store. Timestamp, if present, must not have + higher than millisecond precision. """ snapshot_time = proto.Field( proto.MESSAGE, number=1, message=timestamp_pb2.Timestamp, ) + start_time = proto.Field( + proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp, + ) + + class FullExport(proto.Message): + r"""Describes exporting all historical Feature values of all entities of + the EntityType between [start_time, end_time]. + + Attributes: + start_time (google.protobuf.timestamp_pb2.Timestamp): + Excludes Feature values with feature + generation timestamp before this timestamp. If + not set, retrieve oldest values kept in Feature + Store. Timestamp, if present, must not have + higher than millisecond precision. + end_time (google.protobuf.timestamp_pb2.Timestamp): + Exports Feature values as of this timestamp. + If not set, retrieve values as of now. + Timestamp, if present, must not have higher than + millisecond precision. + """ + + start_time = proto.Field( + proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp, + ) + end_time = proto.Field( + proto.MESSAGE, number=1, message=timestamp_pb2.Timestamp, + ) snapshot_export = proto.Field( proto.MESSAGE, number=3, oneof="mode", message=SnapshotExport, ) + full_export = proto.Field( + proto.MESSAGE, number=7, oneof="mode", message=FullExport, + ) entity_type = proto.Field(proto.STRING, number=1,) destination = proto.Field( proto.MESSAGE, number=4, message="FeatureValueDestination", @@ -1214,17 +1260,17 @@ class UpdateFeaturestoreOperationMetadata(proto.Message): class ImportFeatureValuesOperationMetadata(proto.Message): - r"""Details of operations that perform import feature values. + r"""Details of operations that perform import Feature values. Attributes: generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): Operation metadata for Featurestore import - feature values. + Feature values. imported_entity_count (int): Number of entities that have been imported by the operation. imported_feature_value_count (int): - Number of feature values that have been + Number of Feature values that have been imported by the operation. invalid_row_count (int): The number of rows in input source that weren't imported due diff --git a/google/cloud/aiplatform_v1/types/index_endpoint.py b/google/cloud/aiplatform_v1/types/index_endpoint.py index 0371beba3a..6f2edb034e 100644 --- a/google/cloud/aiplatform_v1/types/index_endpoint.py +++ b/google/cloud/aiplatform_v1/types/index_endpoint.py @@ -73,8 +73,7 @@ class IndexEndpoint(proto.Message): of the original Indexes they are the deployments of. network (str): - Required. Immutable. The full name of the Google Compute - Engine + Optional. The full name of the Google Compute Engine `network `__ to which the IndexEndpoint should be peered. @@ -82,10 +81,25 @@ class IndexEndpoint(proto.Message): network. If left unspecified, the Endpoint is not peered with any network. + Only one of the fields, + [network][google.cloud.aiplatform.v1.IndexEndpoint.network] + or + [enable_private_service_connect][google.cloud.aiplatform.v1.IndexEndpoint.enable_private_service_connect], + can be set. + `Format `__: projects/{project}/global/networks/{network}. Where {project} is a project number, as in '12345', and {network} is network name. + enable_private_service_connect (bool): + Optional. If true, expose the IndexEndpoint via private + service connect. + + Only one of the fields, + [network][google.cloud.aiplatform.v1.IndexEndpoint.network] + or + [enable_private_service_connect][google.cloud.aiplatform.v1.IndexEndpoint.enable_private_service_connect], + can be set. """ name = proto.Field(proto.STRING, number=1,) @@ -99,6 +113,7 @@ class IndexEndpoint(proto.Message): create_time = proto.Field(proto.MESSAGE, number=7, message=timestamp_pb2.Timestamp,) update_time = proto.Field(proto.MESSAGE, number=8, message=timestamp_pb2.Timestamp,) network = proto.Field(proto.STRING, number=9,) + enable_private_service_connect = proto.Field(proto.BOOL, number=10,) class DeployedIndex(proto.Message): @@ -255,16 +270,24 @@ class AuthProvider(proto.Message): class IndexPrivateEndpoints(proto.Message): - r"""IndexPrivateEndpoints proto is used to provide paths for - users to send requests via private services access. + r"""IndexPrivateEndpoints proto is used to provide paths for users to + send requests via private endpoints (e.g. private service access, + private service connect). To send request via private service + access, use match_grpc_address. To send request via private service + connect, use service_attachment. Attributes: match_grpc_address (str): Output only. The ip address used to send match gRPC requests. + service_attachment (str): + Output only. The name of the service + attachment resource. Populated if private + service connect is enabled. """ match_grpc_address = proto.Field(proto.STRING, number=1,) + service_attachment = proto.Field(proto.STRING, number=2,) __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1/types/index_endpoint_service.py b/google/cloud/aiplatform_v1/types/index_endpoint_service.py index 2dfd523bc0..f3d57b0f19 100644 --- a/google/cloud/aiplatform_v1/types/index_endpoint_service.py +++ b/google/cloud/aiplatform_v1/types/index_endpoint_service.py @@ -36,6 +36,9 @@ "UndeployIndexRequest", "UndeployIndexResponse", "UndeployIndexOperationMetadata", + "MutateDeployedIndexRequest", + "MutateDeployedIndexResponse", + "MutateDeployedIndexOperationMetadata", }, ) @@ -289,4 +292,58 @@ class UndeployIndexOperationMetadata(proto.Message): ) +class MutateDeployedIndexRequest(proto.Message): + r"""Request message for + [IndexEndpointService.MutateDeployedIndex][google.cloud.aiplatform.v1.IndexEndpointService.MutateDeployedIndex]. + + Attributes: + index_endpoint (str): + Required. The name of the IndexEndpoint resource into which + to deploy an Index. Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + deployed_index (google.cloud.aiplatform_v1.types.DeployedIndex): + Required. The DeployedIndex to be updated within the + IndexEndpoint. Currently, the updatable fields are + [DeployedIndex][automatic_resources] and + [DeployedIndex][dedicated_resources] + """ + + index_endpoint = proto.Field(proto.STRING, number=1,) + deployed_index = proto.Field( + proto.MESSAGE, number=2, message=gca_index_endpoint.DeployedIndex, + ) + + +class MutateDeployedIndexResponse(proto.Message): + r"""Response message for + [IndexEndpointService.MutateDeployedIndex][google.cloud.aiplatform.v1.IndexEndpointService.MutateDeployedIndex]. + + Attributes: + deployed_index (google.cloud.aiplatform_v1.types.DeployedIndex): + The DeployedIndex that had been updated in + the IndexEndpoint. + """ + + deployed_index = proto.Field( + proto.MESSAGE, number=1, message=gca_index_endpoint.DeployedIndex, + ) + + +class MutateDeployedIndexOperationMetadata(proto.Message): + r"""Runtime operation information for + [IndexEndpointService.MutateDeployedIndex][google.cloud.aiplatform.v1.IndexEndpointService.MutateDeployedIndex]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + The operation generic information. + deployed_index_id (str): + The unique index id specified by user + """ + + generic_metadata = proto.Field( + proto.MESSAGE, number=1, message=operation.GenericOperationMetadata, + ) + deployed_index_id = proto.Field(proto.STRING, number=2,) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1/types/job_service.py b/google/cloud/aiplatform_v1/types/job_service.py index f61d307fe3..67d1df1469 100644 --- a/google/cloud/aiplatform_v1/types/job_service.py +++ b/google/cloud/aiplatform_v1/types/job_service.py @@ -631,7 +631,7 @@ class SearchModelDeploymentMonitoringStatsAnomaliesRequest(proto.Message): \`projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job} deployed_model_id (str): Required. The DeployedModel ID of the - [google.cloud.aiplatform.master.ModelDeploymentMonitoringObjectiveConfig.deployed_model_id]. + [ModelDeploymentMonitoringObjectiveConfig.deployed_model_id]. feature_display_name (str): The feature display name. If specified, only return the stats belonging to this feature. Format: diff --git a/google/cloud/aiplatform_v1/types/model_deployment_monitoring_job.py b/google/cloud/aiplatform_v1/types/model_deployment_monitoring_job.py index 3bbf2db2a3..21aba235b2 100644 --- a/google/cloud/aiplatform_v1/types/model_deployment_monitoring_job.py +++ b/google/cloud/aiplatform_v1/types/model_deployment_monitoring_job.py @@ -162,9 +162,10 @@ class ModelDeploymentMonitoringJob(proto.Message): resources of this ModelDeploymentMonitoringJob will be secured by this key. enable_monitoring_pipeline_logs (bool): - If true, the scheduled monitoring pipeline status logs are - sent to Google Cloud Logging. Please note the logs incur - cost, which are subject to `Cloud Logging + If true, the scheduled monitoring pipeline logs are sent to + Google Cloud Logging, including pipeline status and + anomalies detected. Please note the logs incur cost, which + are subject to `Cloud Logging pricing `__. error (google.rpc.status_pb2.Status): Output only. Only populated when the job's state is diff --git a/google/cloud/aiplatform_v1/types/model_monitoring.py b/google/cloud/aiplatform_v1/types/model_monitoring.py index f396849bb7..9f81bedba0 100644 --- a/google/cloud/aiplatform_v1/types/model_monitoring.py +++ b/google/cloud/aiplatform_v1/types/model_monitoring.py @@ -44,8 +44,8 @@ class ModelMonitoringObjectiveConfig(proto.Message): prediction_drift_detection_config (google.cloud.aiplatform_v1.types.ModelMonitoringObjectiveConfig.PredictionDriftDetectionConfig): The config for drift of prediction data. explanation_config (google.cloud.aiplatform_v1.types.ModelMonitoringObjectiveConfig.ExplanationConfig): - The config for integrated with Explainable - AI. + The config for integrating with Vertex + Explainable AI. """ class TrainingDataset(proto.Message): @@ -160,14 +160,14 @@ class PredictionDriftDetectionConfig(proto.Message): ) class ExplanationConfig(proto.Message): - r"""The config for integrated with Explainable AI. Only applicable if - the Model has explanation_spec populated. + r"""The config for integrating with Vertex Explainable AI. Only + applicable if the Model has explanation_spec populated. Attributes: enable_feature_attributes (bool): - If want to analyze the Explainable AI feature - attribute scores or not. If set to true, Vertex - AI will log the feature attributions from + If want to analyze the Vertex Explainable AI + feature attribute scores or not. If set to true, + Vertex AI will log the feature attributions from explain response and do the skew/drift detection for them. explanation_baseline (google.cloud.aiplatform_v1.types.ModelMonitoringObjectiveConfig.ExplanationConfig.ExplanationBaseline): @@ -246,7 +246,7 @@ class PredictionFormat(proto.Enum): class ModelMonitoringAlertConfig(proto.Message): - r"""Next ID: 2 + r"""Next ID: 3 .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields @@ -255,6 +255,12 @@ class ModelMonitoringAlertConfig(proto.Message): Email alert config. This field is a member of `oneof`_ ``alert``. + enable_logging (bool): + Dump the anomalies to Cloud Logging. The anomalies will be + put to json payload encoded from proto + [google.cloud.aiplatform.logging.ModelMonitoringAnomaliesLogEntry][]. + This can be further sinked to Pub/Sub or any other services + supported by Cloud Logging. """ class EmailAlertConfig(proto.Message): @@ -270,6 +276,7 @@ class EmailAlertConfig(proto.Message): email_alert_config = proto.Field( proto.MESSAGE, number=1, oneof="alert", message=EmailAlertConfig, ) + enable_logging = proto.Field(proto.BOOL, number=2,) class ThresholdConfig(proto.Message): diff --git a/google/cloud/aiplatform_v1/types/pipeline_job.py b/google/cloud/aiplatform_v1/types/pipeline_job.py index b6cf7ff2b5..9c31981b51 100644 --- a/google/cloud/aiplatform_v1/types/pipeline_job.py +++ b/google/cloud/aiplatform_v1/types/pipeline_job.py @@ -116,11 +116,16 @@ class RuntimeConfig(proto.Message): Attributes: parameters (Sequence[google.cloud.aiplatform_v1.types.PipelineJob.RuntimeConfig.ParametersEntry]): - Deprecated. Use [RuntimeConfig.parameter_values] instead. - The runtime parameters of the PipelineJob. The parameters - will be passed into + Deprecated. Use + [RuntimeConfig.parameter_values][google.cloud.aiplatform.v1.PipelineJob.RuntimeConfig.parameter_values] + instead. The runtime parameters of the PipelineJob. The + parameters will be passed into [PipelineJob.pipeline_spec][google.cloud.aiplatform.v1.PipelineJob.pipeline_spec] - to replace the placeholders at runtime. + to replace the placeholders at runtime. This field is used + by pipelines built using + ``PipelineJob.pipeline_spec.schema_version`` 2.0.0 or lower, + such as pipelines built using Kubeflow Pipelines SDK 1.8 or + lower. gcs_output_directory (str): Required. A path in a Cloud Storage bucket, which will be treated as the root output directory of the pipeline. It is @@ -134,7 +139,11 @@ class RuntimeConfig(proto.Message): The runtime parameters of the PipelineJob. The parameters will be passed into [PipelineJob.pipeline_spec][google.cloud.aiplatform.v1.PipelineJob.pipeline_spec] - to replace the placeholders at runtime. + to replace the placeholders at runtime. This field is used + by pipelines built using + ``PipelineJob.pipeline_spec.schema_version`` 2.1.0, such as + pipelines built using Kubeflow Pipelines SDK 1.9 or higher + and the v2 DSL. """ parameters = proto.MapField( diff --git a/google/cloud/aiplatform_v1/types/pipeline_service.py b/google/cloud/aiplatform_v1/types/pipeline_service.py index 28d1309a10..0c54e44a0b 100644 --- a/google/cloud/aiplatform_v1/types/pipeline_service.py +++ b/google/cloud/aiplatform_v1/types/pipeline_service.py @@ -227,6 +227,7 @@ class ListPipelineJobsRequest(proto.Message): comparisons, and ``:`` wildcard. for example, can check if pipeline's display_name contains *step* by doing display_name:"*step*" + - ``state``: Supports ``=`` and ``!=`` comparisons. - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, ``<=``, and ``>=`` comparisons. Values must be in RFC 3339 format. diff --git a/google/cloud/aiplatform_v1/types/study.py b/google/cloud/aiplatform_v1/types/study.py index 97baf53cda..e8d807a2dd 100644 --- a/google/cloud/aiplatform_v1/types/study.py +++ b/google/cloud/aiplatform_v1/types/study.py @@ -27,7 +27,8 @@ class Study(proto.Message): - r"""A message representing a Study. + r"""LINT.IfChange + A message representing a Study. Attributes: name (str): @@ -97,13 +98,14 @@ class Trial(proto.Message): client_id (str): Output only. The identifier of the client that originally requested this Trial. Each client is identified by a unique - client_id. When a client asks for a suggestion, Vizier will - assign it a Trial. The client should evaluate the Trial, - complete it, and report back to Vizier. If suggestion is - asked again by same client_id before the Trial is completed, - the same Trial will be returned. Multiple clients with - different client_ids can ask for suggestions simultaneously, - each of them will get their own Trial. + client_id. When a client asks for a suggestion, Vertex AI + Vizier will assign it a Trial. The client should evaluate + the Trial, complete it, and report back to Vertex AI Vizier. + If suggestion is asked again by same client_id before the + Trial is completed, the same Trial will be returned. + Multiple clients with different client_ids can ask for + suggestions simultaneously, each of them will get their own + Trial. infeasible_reason (str): Output only. A human readable string describing why the Trial is infeasible. This is set only if Trial state is @@ -202,9 +204,9 @@ class StudySpec(proto.Message): The search algorithm specified for the Study. observation_noise (google.cloud.aiplatform_v1.types.StudySpec.ObservationNoise): The observation noise level of the study. - Currently only supported by the Vizier service. - Not supported by HyperparamterTuningJob or - TrainingPipeline. + Currently only supported by the Vertex AI Vizier + service. Not supported by HyperparamterTuningJob + or TrainingPipeline. measurement_selection_type (google.cloud.aiplatform_v1.types.StudySpec.MeasurementSelectionType): Describe which measurement selection type will be used @@ -329,8 +331,8 @@ class DoubleValueSpec(proto.Message): to be a relatively good starting point. Unset value signals that there is no offered starting point. - Currently only supported by the Vizier service. Not - supported by HyperparamterTuningJob or TrainingPipeline. + Currently only supported by the Vertex AI Vizier service. + Not supported by HyperparamterTuningJob or TrainingPipeline. This field is a member of `oneof`_ ``_default_value``. """ @@ -354,8 +356,8 @@ class IntegerValueSpec(proto.Message): to be a relatively good starting point. Unset value signals that there is no offered starting point. - Currently only supported by the Vizier service. Not - supported by HyperparamterTuningJob or TrainingPipeline. + Currently only supported by the Vertex AI Vizier service. + Not supported by HyperparamterTuningJob or TrainingPipeline. This field is a member of `oneof`_ ``_default_value``. """ diff --git a/google/cloud/aiplatform_v1/types/training_pipeline.py b/google/cloud/aiplatform_v1/types/training_pipeline.py index e7d32effd4..93f4449ece 100644 --- a/google/cloud/aiplatform_v1/types/training_pipeline.py +++ b/google/cloud/aiplatform_v1/types/training_pipeline.py @@ -33,6 +33,7 @@ "FilterSplit", "PredefinedSplit", "TimestampSplit", + "StratifiedSplit", }, ) @@ -206,6 +207,12 @@ class InputDataConfig(proto.Message): Split based on the timestamp of the input data pieces. + This field is a member of `oneof`_ ``split``. + stratified_split (google.cloud.aiplatform_v1.types.StratifiedSplit): + Supported only for tabular Datasets. + Split based on the distribution of the specified + column. + This field is a member of `oneof`_ ``split``. gcs_destination (google.cloud.aiplatform_v1.types.GcsDestination): The Cloud Storage location where the training data is to be @@ -322,6 +329,9 @@ class InputDataConfig(proto.Message): timestamp_split = proto.Field( proto.MESSAGE, number=5, oneof="split", message="TimestampSplit", ) + stratified_split = proto.Field( + proto.MESSAGE, number=12, oneof="split", message="StratifiedSplit", + ) gcs_destination = proto.Field( proto.MESSAGE, number=8, oneof="destination", message=io.GcsDestination, ) @@ -456,4 +466,45 @@ class TimestampSplit(proto.Message): key = proto.Field(proto.STRING, number=4,) +class StratifiedSplit(proto.Message): + r"""Assigns input data to the training, validation, and test sets so + that the distribution of values found in the categorical column (as + specified by the ``key`` field) is mirrored within each split. The + fraction values determine the relative sizes of the splits. + + For example, if the specified column has three values, with 50% of + the rows having value "A", 25% value "B", and 25% value "C", and the + split fractions are specified as 80/10/10, then the training set + will constitute 80% of the training data, with about 50% of the + training set rows having the value "A" for the specified column, + about 25% having the value "B", and about 25% having the value "C". + + Only the top 500 occurring values are used; any values not in the + top 500 values are randomly assigned to a split. If less than three + rows contain a specific value, those rows are randomly assigned. + + Supported only for tabular Datasets. + + Attributes: + training_fraction (float): + The fraction of the input data that is to be + used to train the Model. + validation_fraction (float): + The fraction of the input data that is to be + used to validate the Model. + test_fraction (float): + The fraction of the input data that is to be + used to evaluate the Model. + key (str): + Required. The key is a name of one of the + Dataset's data columns. The key provided must be + for a categorical column. + """ + + training_fraction = proto.Field(proto.DOUBLE, number=1,) + validation_fraction = proto.Field(proto.DOUBLE, number=2,) + test_fraction = proto.Field(proto.DOUBLE, number=3,) + key = proto.Field(proto.STRING, number=4,) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/unmanaged_container_model.py b/google/cloud/aiplatform_v1/types/unmanaged_container_model.py similarity index 82% rename from owl-bot-staging/v1/google/cloud/aiplatform_v1/types/unmanaged_container_model.py rename to google/cloud/aiplatform_v1/types/unmanaged_container_model.py index 92788d49c2..07deefc70e 100644 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/unmanaged_container_model.py +++ b/google/cloud/aiplatform_v1/types/unmanaged_container_model.py @@ -19,10 +19,7 @@ __protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'UnmanagedContainerModel', - }, + package="google.cloud.aiplatform.v1", manifest={"UnmanagedContainerModel",}, ) @@ -43,19 +40,12 @@ class UnmanagedContainerModel(proto.Message): Model. """ - artifact_uri = proto.Field( - proto.STRING, - number=1, - ) + artifact_uri = proto.Field(proto.STRING, number=1,) predict_schemata = proto.Field( - proto.MESSAGE, - number=2, - message=model.PredictSchemata, + proto.MESSAGE, number=2, message=model.PredictSchemata, ) container_spec = proto.Field( - proto.MESSAGE, - number=3, - message=model.ModelContainerSpec, + proto.MESSAGE, number=3, message=model.ModelContainerSpec, ) diff --git a/google/cloud/aiplatform_v1beta1/__init__.py b/google/cloud/aiplatform_v1beta1/__init__.py index 3c5353af83..801ae9f5b3 100644 --- a/google/cloud/aiplatform_v1beta1/__init__.py +++ b/google/cloud/aiplatform_v1beta1/__init__.py @@ -112,6 +112,7 @@ from .types.event import Event from .types.execution import Execution from .types.explanation import Attribution +from .types.explanation import BlurBaselineConfig from .types.explanation import Explanation from .types.explanation import ExplanationMetadataOverride from .types.explanation import ExplanationParameters @@ -189,6 +190,9 @@ from .types.index_endpoint_service import GetIndexEndpointRequest from .types.index_endpoint_service import ListIndexEndpointsRequest from .types.index_endpoint_service import ListIndexEndpointsResponse +from .types.index_endpoint_service import MutateDeployedIndexOperationMetadata +from .types.index_endpoint_service import MutateDeployedIndexRequest +from .types.index_endpoint_service import MutateDeployedIndexResponse from .types.index_endpoint_service import UndeployIndexOperationMetadata from .types.index_endpoint_service import UndeployIndexRequest from .types.index_endpoint_service import UndeployIndexResponse @@ -447,12 +451,14 @@ from .types.training_pipeline import FractionSplit from .types.training_pipeline import InputDataConfig from .types.training_pipeline import PredefinedSplit +from .types.training_pipeline import StratifiedSplit from .types.training_pipeline import TimestampSplit from .types.training_pipeline import TrainingPipeline from .types.types import BoolArray from .types.types import DoubleArray from .types.types import Int64Array from .types.types import StringArray +from .types.unmanaged_container_model import UnmanagedContainerModel from .types.user_action_reference import UserActionReference from .types.value import Value from .types.vizier_service import AddTrialMeasurementRequest @@ -529,6 +535,7 @@ "BatchReadTensorboardTimeSeriesDataResponse", "BigQueryDestination", "BigQuerySource", + "BlurBaselineConfig", "BoolArray", "CancelBatchPredictionJobRequest", "CancelCustomJobRequest", @@ -813,6 +820,9 @@ "ModelMonitoringObjectiveConfig", "ModelMonitoringStatsAnomalies", "ModelServiceClient", + "MutateDeployedIndexOperationMetadata", + "MutateDeployedIndexRequest", + "MutateDeployedIndexResponse", "NearestNeighborSearchOperationMetadata", "PauseModelDeploymentMonitoringJobRequest", "PipelineJob", @@ -866,6 +876,7 @@ "SpecialistPool", "SpecialistPoolServiceClient", "StopTrialRequest", + "StratifiedSplit", "StreamingReadFeatureValuesRequest", "StringArray", "Study", @@ -895,6 +906,7 @@ "UndeployModelOperationMetadata", "UndeployModelRequest", "UndeployModelResponse", + "UnmanagedContainerModel", "UpdateArtifactRequest", "UpdateContextRequest", "UpdateDatasetRequest", diff --git a/google/cloud/aiplatform_v1beta1/gapic_metadata.json b/google/cloud/aiplatform_v1beta1/gapic_metadata.json index d6469e96ee..b584f16b81 100644 --- a/google/cloud/aiplatform_v1beta1/gapic_metadata.json +++ b/google/cloud/aiplatform_v1beta1/gapic_metadata.json @@ -481,6 +481,11 @@ "list_index_endpoints" ] }, + "MutateDeployedIndex": { + "methods": [ + "mutate_deployed_index" + ] + }, "UndeployIndex": { "methods": [ "undeploy_index" @@ -521,6 +526,11 @@ "list_index_endpoints" ] }, + "MutateDeployedIndex": { + "methods": [ + "mutate_deployed_index" + ] + }, "UndeployIndex": { "methods": [ "undeploy_index" diff --git a/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py index 41527577f7..9511c58a96 100644 --- a/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py @@ -191,6 +191,7 @@ async def create_endpoint( *, parent: str = None, endpoint: gca_endpoint.Endpoint = None, + endpoint_id: str = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -214,6 +215,21 @@ async def create_endpoint( This corresponds to the ``endpoint`` field on the ``request`` instance; if ``request`` is provided, this should not be set. + endpoint_id (:class:`str`): + Immutable. The ID to use for endpoint, which will become + the final component of the endpoint resource name. If + not provided, Vertex AI will generate a value for this + ID. + + This value should be 1-10 characters, and valid + characters are /[0-9]/. When using HTTP/JSON, this field + is populated based on a query string argument, such as + ``?endpoint_id=12345``. This is the fallback for fields + that are not included in either the URI or the body. + + This corresponds to the ``endpoint_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -231,7 +247,7 @@ async def create_endpoint( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, endpoint]) + has_flattened_params = any([parent, endpoint, endpoint_id]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -246,6 +262,8 @@ async def create_endpoint( request.parent = parent if endpoint is not None: request.endpoint = endpoint + if endpoint_id is not None: + request.endpoint_id = endpoint_id # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. diff --git a/google/cloud/aiplatform_v1beta1/services/endpoint_service/client.py b/google/cloud/aiplatform_v1beta1/services/endpoint_service/client.py index eb4bdaf03c..247fc94dcd 100644 --- a/google/cloud/aiplatform_v1beta1/services/endpoint_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/endpoint_service/client.py @@ -424,6 +424,7 @@ def create_endpoint( *, parent: str = None, endpoint: gca_endpoint.Endpoint = None, + endpoint_id: str = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -447,6 +448,21 @@ def create_endpoint( This corresponds to the ``endpoint`` field on the ``request`` instance; if ``request`` is provided, this should not be set. + endpoint_id (str): + Immutable. The ID to use for endpoint, which will become + the final component of the endpoint resource name. If + not provided, Vertex AI will generate a value for this + ID. + + This value should be 1-10 characters, and valid + characters are /[0-9]/. When using HTTP/JSON, this field + is populated based on a query string argument, such as + ``?endpoint_id=12345``. This is the fallback for fields + that are not included in either the URI or the body. + + This corresponds to the ``endpoint_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -464,7 +480,7 @@ def create_endpoint( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, endpoint]) + has_flattened_params = any([parent, endpoint, endpoint_id]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -483,6 +499,8 @@ def create_endpoint( request.parent = parent if endpoint is not None: request.endpoint = endpoint + if endpoint_id is not None: + request.endpoint_id = endpoint_id # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/featurestore_service/async_client.py index 8038879ee8..cbe4cf700b 100644 --- a/google/cloud/aiplatform_v1beta1/services/featurestore_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_service/async_client.py @@ -198,6 +198,7 @@ async def create_featurestore( *, parent: str = None, featurestore: gca_featurestore.Featurestore = None, + featurestore_id: str = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -222,6 +223,21 @@ async def create_featurestore( This corresponds to the ``featurestore`` field on the ``request`` instance; if ``request`` is provided, this should not be set. + featurestore_id (:class:`str`): + Required. The ID to use for this Featurestore, which + will become the final component of the Featurestore's + resource name. + + This value may be up to 60 characters, and valid + characters are ``[a-z0-9_]``. The first character cannot + be a number. + + The value must be unique within the project and + location. + + This corresponds to the ``featurestore_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -232,16 +248,16 @@ async def create_featurestore( google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.Featurestore` - Featurestore configuration information on how the - Featurestore is configured. + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Featurestore` Vertex AI Feature Store provides a centralized repository for organizing, + storing, and serving ML features. The Featurestore is + a top-level container for your features and their + values. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, featurestore]) + has_flattened_params = any([parent, featurestore, featurestore_id]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -256,6 +272,8 @@ async def create_featurestore( request.parent = parent if featurestore is not None: request.featurestore = featurestore + if featurestore_id is not None: + request.featurestore_id = featurestore_id # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. @@ -315,9 +333,11 @@ async def get_featurestore( Returns: google.cloud.aiplatform_v1beta1.types.Featurestore: - Featurestore configuration - information on how the Featurestore is - configured. + Vertex AI Feature Store provides a + centralized repository for organizing, + storing, and serving ML features. The + Featurestore is a top-level container + for your features and their values. """ # Create or coerce a protobuf request object. @@ -490,10 +510,10 @@ async def update_featurestore( google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.Featurestore` - Featurestore configuration information on how the - Featurestore is configured. + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Featurestore` Vertex AI Feature Store provides a centralized repository for organizing, + storing, and serving ML features. The Featurestore is + a top-level container for your features and their + values. """ # Create or coerce a protobuf request object. @@ -659,6 +679,7 @@ async def create_entity_type( *, parent: str = None, entity_type: gca_entity_type.EntityType = None, + entity_type_id: str = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -682,6 +703,20 @@ async def create_entity_type( This corresponds to the ``entity_type`` field on the ``request`` instance; if ``request`` is provided, this should not be set. + entity_type_id (:class:`str`): + Required. The ID to use for the EntityType, which will + become the final component of the EntityType's resource + name. + + This value may be up to 60 characters, and valid + characters are ``[a-z0-9_]``. The first character cannot + be a number. + + The value must be unique within a featurestore. + + This corresponds to the ``entity_type_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -701,7 +736,7 @@ async def create_entity_type( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, entity_type]) + has_flattened_params = any([parent, entity_type, entity_type_id]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -716,6 +751,8 @@ async def create_entity_type( request.parent = parent if entity_type is not None: request.entity_type = entity_type + if entity_type_id is not None: + request.entity_type_id = entity_type_id # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. @@ -1115,6 +1152,7 @@ async def create_feature( *, parent: str = None, feature: gca_feature.Feature = None, + feature_id: str = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -1138,6 +1176,20 @@ async def create_feature( This corresponds to the ``feature`` field on the ``request`` instance; if ``request`` is provided, this should not be set. + feature_id (:class:`str`): + Required. The ID to use for the Feature, which will + become the final component of the Feature's resource + name. + + This value may be up to 60 characters, and valid + characters are ``[a-z0-9_]``. The first character cannot + be a number. + + The value must be unique within an EntityType. + + This corresponds to the ``feature_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1156,7 +1208,7 @@ async def create_feature( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, feature]) + has_flattened_params = any([parent, feature, feature_id]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1171,6 +1223,8 @@ async def create_feature( request.parent = parent if feature is not None: request.feature = feature + if feature_id is not None: + request.feature_id = feature_id # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_service/client.py b/google/cloud/aiplatform_v1beta1/services/featurestore_service/client.py index 562cd5a808..a149a9c488 100644 --- a/google/cloud/aiplatform_v1beta1/services/featurestore_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_service/client.py @@ -426,6 +426,7 @@ def create_featurestore( *, parent: str = None, featurestore: gca_featurestore.Featurestore = None, + featurestore_id: str = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -450,6 +451,21 @@ def create_featurestore( This corresponds to the ``featurestore`` field on the ``request`` instance; if ``request`` is provided, this should not be set. + featurestore_id (str): + Required. The ID to use for this Featurestore, which + will become the final component of the Featurestore's + resource name. + + This value may be up to 60 characters, and valid + characters are ``[a-z0-9_]``. The first character cannot + be a number. + + The value must be unique within the project and + location. + + This corresponds to the ``featurestore_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -460,16 +476,16 @@ def create_featurestore( google.api_core.operation.Operation: An object representing a long-running operation. - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.Featurestore` - Featurestore configuration information on how the - Featurestore is configured. + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Featurestore` Vertex AI Feature Store provides a centralized repository for organizing, + storing, and serving ML features. The Featurestore is + a top-level container for your features and their + values. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, featurestore]) + has_flattened_params = any([parent, featurestore, featurestore_id]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -488,6 +504,8 @@ def create_featurestore( request.parent = parent if featurestore is not None: request.featurestore = featurestore + if featurestore_id is not None: + request.featurestore_id = featurestore_id # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. @@ -543,9 +561,11 @@ def get_featurestore( Returns: google.cloud.aiplatform_v1beta1.types.Featurestore: - Featurestore configuration - information on how the Featurestore is - configured. + Vertex AI Feature Store provides a + centralized repository for organizing, + storing, and serving ML features. The + Featurestore is a top-level container + for your features and their values. """ # Create or coerce a protobuf request object. @@ -718,10 +738,10 @@ def update_featurestore( google.api_core.operation.Operation: An object representing a long-running operation. - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.Featurestore` - Featurestore configuration information on how the - Featurestore is configured. + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Featurestore` Vertex AI Feature Store provides a centralized repository for organizing, + storing, and serving ML features. The Featurestore is + a top-level container for your features and their + values. """ # Create or coerce a protobuf request object. @@ -887,6 +907,7 @@ def create_entity_type( *, parent: str = None, entity_type: gca_entity_type.EntityType = None, + entity_type_id: str = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -910,6 +931,20 @@ def create_entity_type( This corresponds to the ``entity_type`` field on the ``request`` instance; if ``request`` is provided, this should not be set. + entity_type_id (str): + Required. The ID to use for the EntityType, which will + become the final component of the EntityType's resource + name. + + This value may be up to 60 characters, and valid + characters are ``[a-z0-9_]``. The first character cannot + be a number. + + The value must be unique within a featurestore. + + This corresponds to the ``entity_type_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -929,7 +964,7 @@ def create_entity_type( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, entity_type]) + has_flattened_params = any([parent, entity_type, entity_type_id]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -948,6 +983,8 @@ def create_entity_type( request.parent = parent if entity_type is not None: request.entity_type = entity_type + if entity_type_id is not None: + request.entity_type_id = entity_type_id # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. @@ -1343,6 +1380,7 @@ def create_feature( *, parent: str = None, feature: gca_feature.Feature = None, + feature_id: str = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -1366,6 +1404,20 @@ def create_feature( This corresponds to the ``feature`` field on the ``request`` instance; if ``request`` is provided, this should not be set. + feature_id (str): + Required. The ID to use for the Feature, which will + become the final component of the Feature's resource + name. + + This value may be up to 60 characters, and valid + characters are ``[a-z0-9_]``. The first character cannot + be a number. + + The value must be unique within an EntityType. + + This corresponds to the ``feature_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1384,7 +1436,7 @@ def create_feature( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, feature]) + has_flattened_params = any([parent, feature, feature_id]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1403,6 +1455,8 @@ def create_feature( request.parent = parent if feature is not None: request.feature = feature + if feature_id is not None: + request.feature_id = feature_id # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. diff --git a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/async_client.py index 50fe8ae2ad..a1e5aa81b8 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/async_client.py @@ -798,6 +798,105 @@ async def undeploy_index( # Done; return the response. return response + async def mutate_deployed_index( + self, + request: Union[index_endpoint_service.MutateDeployedIndexRequest, dict] = None, + *, + index_endpoint: str = None, + deployed_index: gca_index_endpoint.DeployedIndex = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Update an existing DeployedIndex under an + IndexEndpoint. + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.MutateDeployedIndexRequest, dict]): + The request object. Request message for + [IndexEndpointService.MutateDeployedIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.MutateDeployedIndex]. + index_endpoint (:class:`str`): + Required. The name of the IndexEndpoint resource into + which to deploy an Index. Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + + This corresponds to the ``index_endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deployed_index (:class:`google.cloud.aiplatform_v1beta1.types.DeployedIndex`): + Required. The DeployedIndex to be updated within the + IndexEndpoint. Currently, the updatable fields are + [DeployedIndex][automatic_resources] and + [DeployedIndex][dedicated_resources] + + This corresponds to the ``deployed_index`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.MutateDeployedIndexResponse` + Response message for + [IndexEndpointService.MutateDeployedIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.MutateDeployedIndex]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([index_endpoint, deployed_index]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = index_endpoint_service.MutateDeployedIndexRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if index_endpoint is not None: + request.index_endpoint = index_endpoint + if deployed_index is not None: + request.deployed_index = deployed_index + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.mutate_deployed_index, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("index_endpoint", request.index_endpoint),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + index_endpoint_service.MutateDeployedIndexResponse, + metadata_type=index_endpoint_service.MutateDeployedIndexOperationMetadata, + ) + + # Done; return the response. + return response + async def __aenter__(self): return self diff --git a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/client.py b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/client.py index c93a0cd4e3..49f4dc9da6 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/client.py @@ -1003,6 +1003,105 @@ def undeploy_index( # Done; return the response. return response + def mutate_deployed_index( + self, + request: Union[index_endpoint_service.MutateDeployedIndexRequest, dict] = None, + *, + index_endpoint: str = None, + deployed_index: gca_index_endpoint.DeployedIndex = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Update an existing DeployedIndex under an + IndexEndpoint. + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.MutateDeployedIndexRequest, dict]): + The request object. Request message for + [IndexEndpointService.MutateDeployedIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.MutateDeployedIndex]. + index_endpoint (str): + Required. The name of the IndexEndpoint resource into + which to deploy an Index. Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + + This corresponds to the ``index_endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deployed_index (google.cloud.aiplatform_v1beta1.types.DeployedIndex): + Required. The DeployedIndex to be updated within the + IndexEndpoint. Currently, the updatable fields are + [DeployedIndex][automatic_resources] and + [DeployedIndex][dedicated_resources] + + This corresponds to the ``deployed_index`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.aiplatform_v1beta1.types.MutateDeployedIndexResponse` + Response message for + [IndexEndpointService.MutateDeployedIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.MutateDeployedIndex]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([index_endpoint, deployed_index]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a index_endpoint_service.MutateDeployedIndexRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, index_endpoint_service.MutateDeployedIndexRequest): + request = index_endpoint_service.MutateDeployedIndexRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if index_endpoint is not None: + request.index_endpoint = index_endpoint + if deployed_index is not None: + request.deployed_index = deployed_index + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.mutate_deployed_index] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("index_endpoint", request.index_endpoint),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + index_endpoint_service.MutateDeployedIndexResponse, + metadata_type=index_endpoint_service.MutateDeployedIndexOperationMetadata, + ) + + # Done; return the response. + return response + def __enter__(self): return self diff --git a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/base.py index ad234b4d93..729e32879b 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/base.py @@ -152,6 +152,11 @@ def _prep_wrapped_messages(self, client_info): self.undeploy_index: gapic_v1.method.wrap_method( self.undeploy_index, default_timeout=5.0, client_info=client_info, ), + self.mutate_deployed_index: gapic_v1.method.wrap_method( + self.mutate_deployed_index, + default_timeout=None, + client_info=client_info, + ), } def close(self): @@ -237,5 +242,14 @@ def undeploy_index( ]: raise NotImplementedError() + @property + def mutate_deployed_index( + self, + ) -> Callable[ + [index_endpoint_service.MutateDeployedIndexRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + __all__ = ("IndexEndpointServiceTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc.py index 59eff52bc4..5704bc41f4 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc.py @@ -446,6 +446,35 @@ def undeploy_index( ) return self._stubs["undeploy_index"] + @property + def mutate_deployed_index( + self, + ) -> Callable[ + [index_endpoint_service.MutateDeployedIndexRequest], operations_pb2.Operation + ]: + r"""Return a callable for the mutate deployed index method over gRPC. + + Update an existing DeployedIndex under an + IndexEndpoint. + + Returns: + Callable[[~.MutateDeployedIndexRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "mutate_deployed_index" not in self._stubs: + self._stubs["mutate_deployed_index"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.IndexEndpointService/MutateDeployedIndex", + request_serializer=index_endpoint_service.MutateDeployedIndexRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["mutate_deployed_index"] + def close(self): self.grpc_channel.close() diff --git a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc_asyncio.py index 2aa9a4765e..e8b2c2ccaf 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc_asyncio.py @@ -454,6 +454,36 @@ def undeploy_index( ) return self._stubs["undeploy_index"] + @property + def mutate_deployed_index( + self, + ) -> Callable[ + [index_endpoint_service.MutateDeployedIndexRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the mutate deployed index method over gRPC. + + Update an existing DeployedIndex under an + IndexEndpoint. + + Returns: + Callable[[~.MutateDeployedIndexRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "mutate_deployed_index" not in self._stubs: + self._stubs["mutate_deployed_index"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.IndexEndpointService/MutateDeployedIndex", + request_serializer=index_endpoint_service.MutateDeployedIndexRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["mutate_deployed_index"] + def close(self): return self.grpc_channel.close() diff --git a/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py index 4aae09222b..6022a7c16a 100644 --- a/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py @@ -63,6 +63,7 @@ from google.cloud.aiplatform_v1beta1.types import model_monitoring from google.cloud.aiplatform_v1beta1.types import operation as gca_operation from google.cloud.aiplatform_v1beta1.types import study +from google.cloud.aiplatform_v1beta1.types import unmanaged_container_model from google.protobuf import duration_pb2 # type: ignore from google.protobuf import empty_pb2 # type: ignore from google.protobuf import field_mask_pb2 # type: ignore @@ -1976,7 +1977,7 @@ async def search_model_deployment_monitoring_stats_anomalies( should not be set. deployed_model_id (:class:`str`): Required. The DeployedModel ID of the - [google.cloud.aiplatform.master.ModelDeploymentMonitoringObjectiveConfig.deployed_model_id]. + [ModelDeploymentMonitoringObjectiveConfig.deployed_model_id]. This corresponds to the ``deployed_model_id`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1beta1/services/job_service/client.py b/google/cloud/aiplatform_v1beta1/services/job_service/client.py index 6a3ab9b01c..57d840f33e 100644 --- a/google/cloud/aiplatform_v1beta1/services/job_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/job_service/client.py @@ -66,6 +66,7 @@ from google.cloud.aiplatform_v1beta1.types import model_monitoring from google.cloud.aiplatform_v1beta1.types import operation as gca_operation from google.cloud.aiplatform_v1beta1.types import study +from google.cloud.aiplatform_v1beta1.types import unmanaged_container_model from google.protobuf import duration_pb2 # type: ignore from google.protobuf import empty_pb2 # type: ignore from google.protobuf import field_mask_pb2 # type: ignore @@ -2337,7 +2338,7 @@ def search_model_deployment_monitoring_stats_anomalies( should not be set. deployed_model_id (str): Required. The DeployedModel ID of the - [google.cloud.aiplatform.master.ModelDeploymentMonitoringObjectiveConfig.deployed_model_id]. + [ModelDeploymentMonitoringObjectiveConfig.deployed_model_id]. This corresponds to the ``deployed_model_id`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py index f8a3d626e7..028e8bb41a 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py @@ -528,8 +528,13 @@ async def delete_model( ) -> operation_async.AsyncOperation: r"""Deletes a Model. - Model can only be deleted if there are no [DeployedModels][] - created from it. + A model cannot be deleted if any + [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] resource + has a + [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] + based on the model in its + [deployed_models][google.cloud.aiplatform.v1beta1.Endpoint.deployed_models] + field. Args: request (Union[google.cloud.aiplatform_v1beta1.types.DeleteModelRequest, dict]): @@ -623,7 +628,7 @@ async def export_model( timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: - r"""Exports a trained, exportable, Model to a location specified by + r"""Exports a trained, exportable Model to a location specified by the user. A Model is considered to be exportable if it has at least one [supported export format][google.cloud.aiplatform.v1beta1.Model.supported_export_formats]. diff --git a/google/cloud/aiplatform_v1beta1/services/model_service/client.py b/google/cloud/aiplatform_v1beta1/services/model_service/client.py index 0952ab848e..1ab512337a 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/model_service/client.py @@ -780,8 +780,13 @@ def delete_model( ) -> gac_operation.Operation: r"""Deletes a Model. - Model can only be deleted if there are no [DeployedModels][] - created from it. + A model cannot be deleted if any + [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] resource + has a + [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] + based on the model in its + [deployed_models][google.cloud.aiplatform.v1beta1.Endpoint.deployed_models] + field. Args: request (Union[google.cloud.aiplatform_v1beta1.types.DeleteModelRequest, dict]): @@ -875,7 +880,7 @@ def export_model( timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> gac_operation.Operation: - r"""Exports a trained, exportable, Model to a location specified by + r"""Exports a trained, exportable Model to a location specified by the user. A Model is considered to be exportable if it has at least one [supported export format][google.cloud.aiplatform.v1beta1.Model.supported_export_formats]. diff --git a/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc.py index 5dc6a638aa..4e4d32fe03 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc.py @@ -356,8 +356,13 @@ def delete_model( Deletes a Model. - Model can only be deleted if there are no [DeployedModels][] - created from it. + A model cannot be deleted if any + [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] resource + has a + [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] + based on the model in its + [deployed_models][google.cloud.aiplatform.v1beta1.Endpoint.deployed_models] + field. Returns: Callable[[~.DeleteModelRequest], @@ -383,7 +388,7 @@ def export_model( ) -> Callable[[model_service.ExportModelRequest], operations_pb2.Operation]: r"""Return a callable for the export model method over gRPC. - Exports a trained, exportable, Model to a location specified by + Exports a trained, exportable Model to a location specified by the user. A Model is considered to be exportable if it has at least one [supported export format][google.cloud.aiplatform.v1beta1.Model.supported_export_formats]. diff --git a/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc_asyncio.py index 0ac844c007..f3fa67b56d 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc_asyncio.py @@ -368,8 +368,13 @@ def delete_model( Deletes a Model. - Model can only be deleted if there are no [DeployedModels][] - created from it. + A model cannot be deleted if any + [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] resource + has a + [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] + based on the model in its + [deployed_models][google.cloud.aiplatform.v1beta1.Endpoint.deployed_models] + field. Returns: Callable[[~.DeleteModelRequest], @@ -397,7 +402,7 @@ def export_model( ]: r"""Return a callable for the export model method over gRPC. - Exports a trained, exportable, Model to a location specified by + Exports a trained, exportable Model to a location specified by the user. A Model is considered to be exportable if it has at least one [supported export format][google.cloud.aiplatform.v1beta1.Model.supported_export_formats]. diff --git a/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py index fdcd22c07c..c31048b9eb 100644 --- a/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py @@ -57,7 +57,7 @@ class PipelineServiceAsyncClient: """A service for creating and managing Vertex AI's pipelines. This includes both ``TrainingPipeline`` resources (used for AutoML and - custom training) and ``PipelineJob`` resources (used for Vertex + custom training) and ``PipelineJob`` resources (used for Vertex AI Pipelines). """ diff --git a/google/cloud/aiplatform_v1beta1/services/pipeline_service/client.py b/google/cloud/aiplatform_v1beta1/services/pipeline_service/client.py index 593e2b7edb..35c2ffab76 100644 --- a/google/cloud/aiplatform_v1beta1/services/pipeline_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/pipeline_service/client.py @@ -93,7 +93,7 @@ def get_transport_class(cls, label: str = None,) -> Type[PipelineServiceTranspor class PipelineServiceClient(metaclass=PipelineServiceClientMeta): """A service for creating and managing Vertex AI's pipelines. This includes both ``TrainingPipeline`` resources (used for AutoML and - custom training) and ``PipelineJob`` resources (used for Vertex + custom training) and ``PipelineJob`` resources (used for Vertex AI Pipelines). """ diff --git a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc.py index 01002e8b08..372e193e47 100644 --- a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc.py @@ -42,7 +42,7 @@ class PipelineServiceGrpcTransport(PipelineServiceTransport): A service for creating and managing Vertex AI's pipelines. This includes both ``TrainingPipeline`` resources (used for AutoML and - custom training) and ``PipelineJob`` resources (used for Vertex + custom training) and ``PipelineJob`` resources (used for Vertex AI Pipelines). This class defines the same methods as the primary client, so the diff --git a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc_asyncio.py index 20e90dd7fa..d5c7e82b30 100644 --- a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc_asyncio.py @@ -43,7 +43,7 @@ class PipelineServiceGrpcAsyncIOTransport(PipelineServiceTransport): A service for creating and managing Vertex AI's pipelines. This includes both ``TrainingPipeline`` resources (used for AutoML and - custom training) and ``PipelineJob`` resources (used for Vertex + custom training) and ``PipelineJob`` resources (used for Vertex AI Pipelines). This class defines the same methods as the primary client, so the diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py index 85bd6291ce..fd0677b922 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py @@ -286,8 +286,17 @@ async def raw_predict( timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> httpbody_pb2.HttpBody: - r"""Perform an online prediction with arbitrary http - payload. + r"""Perform an online prediction with an arbitrary HTTP payload. + + The response includes the following HTTP headers: + + - ``X-Vertex-AI-Endpoint-Id``: ID of the + [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] that + served this prediction. + + - ``X-Vertex-AI-Deployed-Model-Id``: ID of the Endpoint's + [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] + that served this prediction. Args: request (Union[google.cloud.aiplatform_v1beta1.types.RawPredictRequest, dict]): diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py index d8e8694054..020817c986 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py @@ -494,8 +494,17 @@ def raw_predict( timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> httpbody_pb2.HttpBody: - r"""Perform an online prediction with arbitrary http - payload. + r"""Perform an online prediction with an arbitrary HTTP payload. + + The response includes the following HTTP headers: + + - ``X-Vertex-AI-Endpoint-Id``: ID of the + [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] that + served this prediction. + + - ``X-Vertex-AI-Deployed-Model-Id``: ID of the Endpoint's + [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] + that served this prediction. Args: request (Union[google.cloud.aiplatform_v1beta1.types.RawPredictRequest, dict]): diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py index 298ab7d052..e911abba1e 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py @@ -260,8 +260,17 @@ def raw_predict( ) -> Callable[[prediction_service.RawPredictRequest], httpbody_pb2.HttpBody]: r"""Return a callable for the raw predict method over gRPC. - Perform an online prediction with arbitrary http - payload. + Perform an online prediction with an arbitrary HTTP payload. + + The response includes the following HTTP headers: + + - ``X-Vertex-AI-Endpoint-Id``: ID of the + [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] that + served this prediction. + + - ``X-Vertex-AI-Deployed-Model-Id``: ID of the Endpoint's + [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] + that served this prediction. Returns: Callable[[~.RawPredictRequest], diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc_asyncio.py index af62a6feb0..4c288670dc 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc_asyncio.py @@ -265,8 +265,17 @@ def raw_predict( ]: r"""Return a callable for the raw predict method over gRPC. - Perform an online prediction with arbitrary http - payload. + Perform an online prediction with an arbitrary HTTP payload. + + The response includes the following HTTP headers: + + - ``X-Vertex-AI-Endpoint-Id``: ID of the + [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] that + served this prediction. + + - ``X-Vertex-AI-Deployed-Model-Id``: ID of the Endpoint's + [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] + that served this prediction. Returns: Callable[[~.RawPredictRequest], diff --git a/google/cloud/aiplatform_v1beta1/services/vizier_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/vizier_service/async_client.py index aa54f6bab5..9ba5f46b14 100644 --- a/google/cloud/aiplatform_v1beta1/services/vizier_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/vizier_service/async_client.py @@ -44,8 +44,8 @@ class VizierServiceAsyncClient: - """Vertex Vizier API. - Vizier service is a GCP service to solve blackbox optimization + """Vertex AI Vizier API. + Vertex AI Vizier is a service to solve blackbox optimization problems, such as tuning machine learning hyperparameters and searching over deep learning architectures. """ @@ -219,7 +219,9 @@ async def create_study( Returns: google.cloud.aiplatform_v1beta1.types.Study: + LINT.IfChange A message representing a Study. + """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have @@ -290,7 +292,9 @@ async def get_study( Returns: google.cloud.aiplatform_v1beta1.types.Study: + LINT.IfChange A message representing a Study. + """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have @@ -508,7 +512,9 @@ async def lookup_study( Returns: google.cloud.aiplatform_v1beta1.types.Study: + LINT.IfChange A message representing a Study. + """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have @@ -556,7 +562,7 @@ async def suggest_trials( metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Adds one or more Trials to a Study, with parameter values - suggested by Vertex Vizier. Returns a long-running operation + suggested by Vertex AI Vizier. Returns a long-running operation associated with the generation of Trial suggestions. When this long-running operation succeeds, it will contain a [SuggestTrialsResponse][google.cloud.ml.v1.SuggestTrialsResponse]. diff --git a/google/cloud/aiplatform_v1beta1/services/vizier_service/client.py b/google/cloud/aiplatform_v1beta1/services/vizier_service/client.py index 818462a4ad..94f0134400 100644 --- a/google/cloud/aiplatform_v1beta1/services/vizier_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/vizier_service/client.py @@ -78,8 +78,8 @@ def get_transport_class(cls, label: str = None,) -> Type[VizierServiceTransport] class VizierServiceClient(metaclass=VizierServiceClientMeta): - """Vertex Vizier API. - Vizier service is a GCP service to solve blackbox optimization + """Vertex AI Vizier API. + Vertex AI Vizier is a service to solve blackbox optimization problems, such as tuning machine learning hyperparameters and searching over deep learning architectures. """ @@ -437,7 +437,9 @@ def create_study( Returns: google.cloud.aiplatform_v1beta1.types.Study: + LINT.IfChange A message representing a Study. + """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have @@ -508,7 +510,9 @@ def get_study( Returns: google.cloud.aiplatform_v1beta1.types.Study: + LINT.IfChange A message representing a Study. + """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have @@ -726,7 +730,9 @@ def lookup_study( Returns: google.cloud.aiplatform_v1beta1.types.Study: + LINT.IfChange A message representing a Study. + """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have @@ -774,7 +780,7 @@ def suggest_trials( metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Adds one or more Trials to a Study, with parameter values - suggested by Vertex Vizier. Returns a long-running operation + suggested by Vertex AI Vizier. Returns a long-running operation associated with the generation of Trial suggestions. When this long-running operation succeeds, it will contain a [SuggestTrialsResponse][google.cloud.ml.v1.SuggestTrialsResponse]. diff --git a/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc.py index 0c4d387596..5cf7cbaee1 100644 --- a/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc.py @@ -36,8 +36,8 @@ class VizierServiceGrpcTransport(VizierServiceTransport): """gRPC backend transport for VizierService. - Vertex Vizier API. - Vizier service is a GCP service to solve blackbox optimization + Vertex AI Vizier API. + Vertex AI Vizier is a service to solve blackbox optimization problems, such as tuning machine learning hyperparameters and searching over deep learning architectures. @@ -388,7 +388,7 @@ def suggest_trials( r"""Return a callable for the suggest trials method over gRPC. Adds one or more Trials to a Study, with parameter values - suggested by Vertex Vizier. Returns a long-running operation + suggested by Vertex AI Vizier. Returns a long-running operation associated with the generation of Trial suggestions. When this long-running operation succeeds, it will contain a [SuggestTrialsResponse][google.cloud.ml.v1.SuggestTrialsResponse]. diff --git a/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc_asyncio.py index 2168a033d2..7fdb740e87 100644 --- a/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc_asyncio.py @@ -37,8 +37,8 @@ class VizierServiceGrpcAsyncIOTransport(VizierServiceTransport): """gRPC AsyncIO backend transport for VizierService. - Vertex Vizier API. - Vizier service is a GCP service to solve blackbox optimization + Vertex AI Vizier API. + Vertex AI Vizier is a service to solve blackbox optimization problems, such as tuning machine learning hyperparameters and searching over deep learning architectures. @@ -397,7 +397,7 @@ def suggest_trials( r"""Return a callable for the suggest trials method over gRPC. Adds one or more Trials to a Study, with parameter values - suggested by Vertex Vizier. Returns a long-running operation + suggested by Vertex AI Vizier. Returns a long-running operation associated with the generation of Trial suggestions. When this long-running operation succeeds, it will contain a [SuggestTrialsResponse][google.cloud.ml.v1.SuggestTrialsResponse]. diff --git a/google/cloud/aiplatform_v1beta1/types/__init__.py b/google/cloud/aiplatform_v1beta1/types/__init__.py index b5ce0f36fd..fcd3ca5a42 100644 --- a/google/cloud/aiplatform_v1beta1/types/__init__.py +++ b/google/cloud/aiplatform_v1beta1/types/__init__.py @@ -88,6 +88,7 @@ from .execution import Execution from .explanation import ( Attribution, + BlurBaselineConfig, Explanation, ExplanationMetadataOverride, ExplanationParameters, @@ -175,6 +176,9 @@ GetIndexEndpointRequest, ListIndexEndpointsRequest, ListIndexEndpointsResponse, + MutateDeployedIndexOperationMetadata, + MutateDeployedIndexRequest, + MutateDeployedIndexResponse, UndeployIndexOperationMetadata, UndeployIndexRequest, UndeployIndexResponse, @@ -461,6 +465,7 @@ FractionSplit, InputDataConfig, PredefinedSplit, + StratifiedSplit, TimestampSplit, TrainingPipeline, ) @@ -470,6 +475,7 @@ Int64Array, StringArray, ) +from .unmanaged_container_model import UnmanagedContainerModel from .user_action_reference import UserActionReference from .value import Value from .vizier_service import ( @@ -561,6 +567,7 @@ "Event", "Execution", "Attribution", + "BlurBaselineConfig", "Explanation", "ExplanationMetadataOverride", "ExplanationParameters", @@ -638,6 +645,9 @@ "GetIndexEndpointRequest", "ListIndexEndpointsRequest", "ListIndexEndpointsResponse", + "MutateDeployedIndexOperationMetadata", + "MutateDeployedIndexRequest", + "MutateDeployedIndexResponse", "UndeployIndexOperationMetadata", "UndeployIndexRequest", "UndeployIndexResponse", @@ -888,12 +898,14 @@ "FractionSplit", "InputDataConfig", "PredefinedSplit", + "StratifiedSplit", "TimestampSplit", "TrainingPipeline", "BoolArray", "DoubleArray", "Int64Array", "StringArray", + "UnmanagedContainerModel", "UserActionReference", "Value", "AddTrialMeasurementRequest", diff --git a/google/cloud/aiplatform_v1beta1/types/artifact.py b/google/cloud/aiplatform_v1beta1/types/artifact.py index 31657f0f31..d70d6ace76 100644 --- a/google/cloud/aiplatform_v1beta1/types/artifact.py +++ b/google/cloud/aiplatform_v1beta1/types/artifact.py @@ -62,9 +62,9 @@ class Artifact(proto.Message): The state of this Artifact. This is a property of the Artifact, and does not imply or capture any ongoing process. This property is - managed by clients (such as Vertex Pipelines), - and the system does not prescribe or check the - validity of state transitions. + managed by clients (such as Vertex AI + Pipelines), and the system does not prescribe or + check the validity of state transitions. schema_title (str): The title of the schema describing the metadata. diff --git a/google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py b/google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py index 0f8d9cb573..80829d444e 100644 --- a/google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py +++ b/google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py @@ -26,6 +26,9 @@ from google.cloud.aiplatform_v1beta1.types import ( manual_batch_tuning_parameters as gca_manual_batch_tuning_parameters, ) +from google.cloud.aiplatform_v1beta1.types import ( + unmanaged_container_model as gca_unmanaged_container_model, +) from google.protobuf import struct_pb2 # type: ignore from google.protobuf import timestamp_pb2 # type: ignore from google.rpc import status_pb2 # type: ignore @@ -53,11 +56,16 @@ class BatchPredictionJob(proto.Message): Required. The user-defined name of this BatchPredictionJob. model (str): - Required. The name of the Model that produces - the predictions via this job, must share the - same ancestor Location. Starting this job has no - impact on any existing deployments of the Model - and their resources. + The name of the Model resoure that produces the predictions + via this job, must share the same ancestor Location. + Starting this job has no impact on any existing deployments + of the Model and their resources. Exactly one of model and + unmanaged_container_model must be set. + unmanaged_container_model (google.cloud.aiplatform_v1beta1.types.UnmanagedContainerModel): + Contains model information necessary to perform batch + prediction without requiring uploading to model registry. + Exactly one of model and unmanaged_container_model must be + set. input_config (google.cloud.aiplatform_v1beta1.types.BatchPredictionJob.InputConfig): Required. Input configuration of the instances on which predictions are performed. The schema of any single instance @@ -362,6 +370,11 @@ class OutputInfo(proto.Message): name = proto.Field(proto.STRING, number=1,) display_name = proto.Field(proto.STRING, number=2,) model = proto.Field(proto.STRING, number=3,) + unmanaged_container_model = proto.Field( + proto.MESSAGE, + number=28, + message=gca_unmanaged_container_model.UnmanagedContainerModel, + ) input_config = proto.Field(proto.MESSAGE, number=4, message=InputConfig,) model_parameters = proto.Field(proto.MESSAGE, number=5, message=struct_pb2.Value,) output_config = proto.Field(proto.MESSAGE, number=6, message=OutputConfig,) diff --git a/google/cloud/aiplatform_v1beta1/types/custom_job.py b/google/cloud/aiplatform_v1beta1/types/custom_job.py index dbd7c960d7..7ec2a92c03 100644 --- a/google/cloud/aiplatform_v1beta1/types/custom_job.py +++ b/google/cloud/aiplatform_v1beta1/types/custom_job.py @@ -147,9 +147,12 @@ class CustomJobSpec(proto.Message): {project} is a project number, as in ``12345``, and {network} is a network name. - Private services access must already be configured for the - network. If left unspecified, the job is not peered with any - network. + To specify this field, you must have already `configured VPC + Network Peering for Vertex + AI `__. + + If this field is left unspecified, the job is not peered + with any network. base_output_directory (google.cloud.aiplatform_v1beta1.types.GcsDestination): The Cloud Storage location to store the output of this CustomJob or HyperparameterTuningJob. For diff --git a/google/cloud/aiplatform_v1beta1/types/endpoint.py b/google/cloud/aiplatform_v1beta1/types/endpoint.py index 7f04d9907c..4e6981e44e 100644 --- a/google/cloud/aiplatform_v1beta1/types/endpoint.py +++ b/google/cloud/aiplatform_v1beta1/types/endpoint.py @@ -85,17 +85,31 @@ class Endpoint(proto.Message): this key. network (str): The full name of the Google Compute Engine - `network `__ + `network `__ to which the Endpoint should be peered. Private services access must already be configured for the network. If left unspecified, the Endpoint is not peered with any network. + Only one of the fields, + [network][google.cloud.aiplatform.v1beta1.Endpoint.network] + or + [enable_private_service_connect][google.cloud.aiplatform.v1beta1.Endpoint.enable_private_service_connect], + can be set. + `Format `__: - projects/{project}/global/networks/{network}. Where - {project} is a project number, as in '12345', and {network} - is network name. + ``projects/{project}/global/networks/{network}``. Where + ``{project}`` is a project number, as in ``12345``, and + ``{network}`` is network name. + enable_private_service_connect (bool): + If true, expose the Endpoint via private service connect. + + Only one of the fields, + [network][google.cloud.aiplatform.v1beta1.Endpoint.network] + or + [enable_private_service_connect][google.cloud.aiplatform.v1beta1.Endpoint.enable_private_service_connect], + can be set. model_deployment_monitoring_job (str): Output only. Resource name of the Model Monitoring job associated with this Endpoint if monitoring is enabled by @@ -118,6 +132,7 @@ class Endpoint(proto.Message): proto.MESSAGE, number=10, message=gca_encryption_spec.EncryptionSpec, ) network = proto.Field(proto.STRING, number=13,) + enable_private_service_connect = proto.Field(proto.BOOL, number=17,) model_deployment_monitoring_job = proto.Field(proto.STRING, number=14,) @@ -146,7 +161,11 @@ class DeployedModel(proto.Message): This field is a member of `oneof`_ ``prediction_resources``. id (str): - Output only. The ID of the DeployedModel. + Immutable. The ID of the DeployedModel. If not provided upon + deployment, Vertex AI will generate a value for this ID. + + This value should be 1-10 characters, and valid characters + are /[0-9]/. model (str): Required. The name of the Model that this is the deployment of. Note that the Model may be in @@ -239,8 +258,10 @@ class DeployedModel(proto.Message): class PrivateEndpoints(proto.Message): - r"""PrivateEndpoints is used to provide paths for users to send - requests via private services access. + r"""PrivateEndpoints proto is used to provide paths for users to send + requests privately. To send request via private service access, use + predict_http_uri, explain_http_uri or health_http_uri. To send + request via private service connect, use service_attachment. Attributes: predict_http_uri (str): @@ -252,11 +273,16 @@ class PrivateEndpoints(proto.Message): health_http_uri (str): Output only. Http(s) path to send health check requests. + service_attachment (str): + Output only. The name of the service + attachment resource. Populated if private + service connect is enabled. """ predict_http_uri = proto.Field(proto.STRING, number=1,) explain_http_uri = proto.Field(proto.STRING, number=2,) health_http_uri = proto.Field(proto.STRING, number=3,) + service_attachment = proto.Field(proto.STRING, number=4,) __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/endpoint_service.py b/google/cloud/aiplatform_v1beta1/types/endpoint_service.py index 94b28acec2..a21d1ca933 100644 --- a/google/cloud/aiplatform_v1beta1/types/endpoint_service.py +++ b/google/cloud/aiplatform_v1beta1/types/endpoint_service.py @@ -51,10 +51,21 @@ class CreateEndpointRequest(proto.Message): ``projects/{project}/locations/{location}`` endpoint (google.cloud.aiplatform_v1beta1.types.Endpoint): Required. The Endpoint to create. + endpoint_id (str): + Immutable. The ID to use for endpoint, which will become the + final component of the endpoint resource name. If not + provided, Vertex AI will generate a value for this ID. + + This value should be 1-10 characters, and valid characters + are /[0-9]/. When using HTTP/JSON, this field is populated + based on a query string argument, such as + ``?endpoint_id=12345``. This is the fallback for fields that + are not included in either the URI or the body. """ parent = proto.Field(proto.STRING, number=1,) endpoint = proto.Field(proto.MESSAGE, number=2, message=gca_endpoint.Endpoint,) + endpoint_id = proto.Field(proto.STRING, number=4,) class CreateEndpointOperationMetadata(proto.Message): diff --git a/google/cloud/aiplatform_v1beta1/types/execution.py b/google/cloud/aiplatform_v1beta1/types/execution.py index 3dd91ffa6b..85b824ac50 100644 --- a/google/cloud/aiplatform_v1beta1/types/execution.py +++ b/google/cloud/aiplatform_v1beta1/types/execution.py @@ -38,7 +38,7 @@ class Execution(proto.Message): The state of this Execution. This is a property of the Execution, and does not imply or capture any ongoing process. This property is - managed by clients (such as Vertex Pipelines) + managed by clients (such as Vertex AI Pipelines) and the system does not prescribe or check the validity of state transitions. etag (str): diff --git a/google/cloud/aiplatform_v1beta1/types/explanation.py b/google/cloud/aiplatform_v1beta1/types/explanation.py index 4d55149e34..2972aa2183 100644 --- a/google/cloud/aiplatform_v1beta1/types/explanation.py +++ b/google/cloud/aiplatform_v1beta1/types/explanation.py @@ -33,6 +33,7 @@ "XraiAttribution", "SmoothGradConfig", "FeatureNoiseSigma", + "BlurBaselineConfig", "Similarity", "ExplanationSpecOverride", "ExplanationMetadataOverride", @@ -389,12 +390,22 @@ class IntegratedGradientsAttribution(proto.Message): help improve the computed gradients. Refer to this paper for more details: https://arxiv.org/pdf/1706.03825.pdf + blur_baseline_config (google.cloud.aiplatform_v1beta1.types.BlurBaselineConfig): + Config for IG with blur baseline. + When enabled, a linear path from the maximally + blurred image to the input image is created. + Using a blurred baseline instead of zero (black + image) is motivated by the BlurIG approach + explained here: https://arxiv.org/abs/2004.03383 """ step_count = proto.Field(proto.INT32, number=1,) smooth_grad_config = proto.Field( proto.MESSAGE, number=2, message="SmoothGradConfig", ) + blur_baseline_config = proto.Field( + proto.MESSAGE, number=3, message="BlurBaselineConfig", + ) class XraiAttribution(proto.Message): @@ -422,12 +433,22 @@ class XraiAttribution(proto.Message): help improve the computed gradients. Refer to this paper for more details: https://arxiv.org/pdf/1706.03825.pdf + blur_baseline_config (google.cloud.aiplatform_v1beta1.types.BlurBaselineConfig): + Config for XRAI with blur baseline. + When enabled, a linear path from the maximally + blurred image to the input image is created. + Using a blurred baseline instead of zero (black + image) is motivated by the BlurIG approach + explained here: https://arxiv.org/abs/2004.03383 """ step_count = proto.Field(proto.INT32, number=1,) smooth_grad_config = proto.Field( proto.MESSAGE, number=2, message="SmoothGradConfig", ) + blur_baseline_config = proto.Field( + proto.MESSAGE, number=3, message="BlurBaselineConfig", + ) class SmoothGradConfig(proto.Message): @@ -528,6 +549,26 @@ class NoiseSigmaForFeature(proto.Message): ) +class BlurBaselineConfig(proto.Message): + r"""Config for blur baseline. + When enabled, a linear path from the maximally blurred image to + the input image is created. Using a blurred baseline instead of + zero (black image) is motivated by the BlurIG approach explained + here: + https://arxiv.org/abs/2004.03383 + + Attributes: + max_blur_sigma (float): + The standard deviation of the blur kernel for + the blurred baseline. The same blurring + parameter is used for both the height and the + width dimension. If not set, the method defaults + to the zero (i.e. black for images) baseline. + """ + + max_blur_sigma = proto.Field(proto.FLOAT, number=1,) + + class Similarity(proto.Message): r"""Similarity explainability that returns the nearest neighbors from the provided dataset. diff --git a/google/cloud/aiplatform_v1beta1/types/feature.py b/google/cloud/aiplatform_v1beta1/types/feature.py index 1d8ec6500f..7e056694fe 100644 --- a/google/cloud/aiplatform_v1beta1/types/feature.py +++ b/google/cloud/aiplatform_v1beta1/types/feature.py @@ -85,7 +85,7 @@ class Feature(proto.Message): the EntityType's this Feature belongs to. monitoring_stats (Sequence[google.cloud.aiplatform_v1beta1.types.FeatureStatsAnomaly]): Output only. A list of historical [Snapshot - Analysis][google.cloud.aiplatform.master.FeaturestoreMonitoringConfig.SnapshotAnalysis] + Analysis][FeaturestoreMonitoringConfig.SnapshotAnalysis] stats requested by user, sorted by [FeatureStatsAnomaly.start_time][google.cloud.aiplatform.v1beta1.FeatureStatsAnomaly.start_time] descending. diff --git a/google/cloud/aiplatform_v1beta1/types/featurestore.py b/google/cloud/aiplatform_v1beta1/types/featurestore.py index a15904b9af..203dfc4a68 100644 --- a/google/cloud/aiplatform_v1beta1/types/featurestore.py +++ b/google/cloud/aiplatform_v1beta1/types/featurestore.py @@ -25,8 +25,9 @@ class Featurestore(proto.Message): - r"""Featurestore configuration information on how the - Featurestore is configured. + r"""Vertex AI Feature Store provides a centralized repository for + organizing, storing, and serving ML features. The Featurestore + is a top-level container for your features and their values. Attributes: name (str): diff --git a/google/cloud/aiplatform_v1beta1/types/featurestore_online_service.py b/google/cloud/aiplatform_v1beta1/types/featurestore_online_service.py index 12b03262a9..25a42f0bff 100644 --- a/google/cloud/aiplatform_v1beta1/types/featurestore_online_service.py +++ b/google/cloud/aiplatform_v1beta1/types/featurestore_online_service.py @@ -258,7 +258,10 @@ class Metadata(proto.Message): is provided by user at feature ingestion time. If not, feature store will use the system timestamp when the data is ingested into feature - store. + store. For streaming ingestion, the time, + aligned by days, must be no older than five + years (1825 days) and no later than one year + (366 days) in the future. """ generate_time = proto.Field( diff --git a/google/cloud/aiplatform_v1beta1/types/featurestore_service.py b/google/cloud/aiplatform_v1beta1/types/featurestore_service.py index 9076501474..f5c20abdd4 100644 --- a/google/cloud/aiplatform_v1beta1/types/featurestore_service.py +++ b/google/cloud/aiplatform_v1beta1/types/featurestore_service.py @@ -509,13 +509,22 @@ class ExportFeatureValuesRequest(proto.Message): r"""Request message for [FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ExportFeatureValues]. + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields Attributes: snapshot_export (google.cloud.aiplatform_v1beta1.types.ExportFeatureValuesRequest.SnapshotExport): - Exports Feature values of all entities of the - EntityType as of a snapshot time. + Exports the latest Feature values of all + entities of the EntityType within a time range. + + This field is a member of `oneof`_ ``mode``. + full_export (google.cloud.aiplatform_v1beta1.types.ExportFeatureValuesRequest.FullExport): + Exports all historical values of all entities + of the EntityType within a time range This field is a member of `oneof`_ ``mode``. entity_type (str): @@ -533,8 +542,8 @@ class ExportFeatureValuesRequest(proto.Message): """ class SnapshotExport(proto.Message): - r"""Describes exporting Feature values as of the snapshot - timestamp. + r"""Describes exporting the latest Feature values of all entities of the + EntityType between [start_time, snapshot_time]. Attributes: snapshot_time (google.protobuf.timestamp_pb2.Timestamp): @@ -542,15 +551,52 @@ class SnapshotExport(proto.Message): If not set, retrieve values as of now. Timestamp, if present, must not have higher than millisecond precision. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Excludes Feature values with feature + generation timestamp before this timestamp. If + not set, retrieve oldest values kept in Feature + Store. Timestamp, if present, must not have + higher than millisecond precision. """ snapshot_time = proto.Field( proto.MESSAGE, number=1, message=timestamp_pb2.Timestamp, ) + start_time = proto.Field( + proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp, + ) + + class FullExport(proto.Message): + r"""Describes exporting all historical Feature values of all entities of + the EntityType between [start_time, end_time]. + + Attributes: + start_time (google.protobuf.timestamp_pb2.Timestamp): + Excludes Feature values with feature + generation timestamp before this timestamp. If + not set, retrieve oldest values kept in Feature + Store. Timestamp, if present, must not have + higher than millisecond precision. + end_time (google.protobuf.timestamp_pb2.Timestamp): + Exports Feature values as of this timestamp. + If not set, retrieve values as of now. + Timestamp, if present, must not have higher than + millisecond precision. + """ + + start_time = proto.Field( + proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp, + ) + end_time = proto.Field( + proto.MESSAGE, number=1, message=timestamp_pb2.Timestamp, + ) snapshot_export = proto.Field( proto.MESSAGE, number=3, oneof="mode", message=SnapshotExport, ) + full_export = proto.Field( + proto.MESSAGE, number=7, oneof="mode", message=FullExport, + ) entity_type = proto.Field(proto.STRING, number=1,) destination = proto.Field( proto.MESSAGE, number=4, message="FeatureValueDestination", @@ -1216,17 +1262,17 @@ class UpdateFeaturestoreOperationMetadata(proto.Message): class ImportFeatureValuesOperationMetadata(proto.Message): - r"""Details of operations that perform import feature values. + r"""Details of operations that perform import Feature values. Attributes: generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): Operation metadata for Featurestore import - feature values. + Feature values. imported_entity_count (int): Number of entities that have been imported by the operation. imported_feature_value_count (int): - Number of feature values that have been + Number of Feature values that have been imported by the operation. invalid_row_count (int): The number of rows in input source that weren't imported due diff --git a/google/cloud/aiplatform_v1beta1/types/index_endpoint.py b/google/cloud/aiplatform_v1beta1/types/index_endpoint.py index 0739c547c8..c423957375 100644 --- a/google/cloud/aiplatform_v1beta1/types/index_endpoint.py +++ b/google/cloud/aiplatform_v1beta1/types/index_endpoint.py @@ -73,8 +73,7 @@ class IndexEndpoint(proto.Message): of the original Indexes they are the deployments of. network (str): - Required. Immutable. The full name of the Google Compute - Engine + Optional. The full name of the Google Compute Engine `network `__ to which the IndexEndpoint should be peered. @@ -82,10 +81,25 @@ class IndexEndpoint(proto.Message): network. If left unspecified, the Endpoint is not peered with any network. + Only one of the fields, + [network][google.cloud.aiplatform.v1beta1.IndexEndpoint.network] + or + [enable_private_service_connect][google.cloud.aiplatform.v1beta1.IndexEndpoint.enable_private_service_connect], + can be set. + `Format `__: projects/{project}/global/networks/{network}. Where {project} is a project number, as in '12345', and {network} is network name. + enable_private_service_connect (bool): + Optional. If true, expose the IndexEndpoint via private + service connect. + + Only one of the fields, + [network][google.cloud.aiplatform.v1beta1.IndexEndpoint.network] + or + [enable_private_service_connect][google.cloud.aiplatform.v1beta1.IndexEndpoint.enable_private_service_connect], + can be set. """ name = proto.Field(proto.STRING, number=1,) @@ -99,6 +113,7 @@ class IndexEndpoint(proto.Message): create_time = proto.Field(proto.MESSAGE, number=7, message=timestamp_pb2.Timestamp,) update_time = proto.Field(proto.MESSAGE, number=8, message=timestamp_pb2.Timestamp,) network = proto.Field(proto.STRING, number=9,) + enable_private_service_connect = proto.Field(proto.BOOL, number=10,) class DeployedIndex(proto.Message): @@ -152,11 +167,10 @@ class DeployedIndex(proto.Message): Optional. A description of resources that the DeployedIndex uses, which to large degree are decided by Vertex AI, and optionally allows only a modest additional configuration. If - min_replica_count is not set, the default value is 1. If + min_replica_count is not set, the default value is 2 (we + don't provide SLA when min_replica_count=1). If max_replica_count is not set, the default value is min_replica_count. The max allowed replica count is 1000. - The user is billed for the resources (at least their minimal - amount) even if the DeployedIndex receives no traffic. enable_access_logging (bool): Optional. If true, private endpoint's access logs are sent to StackDriver Logging. @@ -256,16 +270,24 @@ class AuthProvider(proto.Message): class IndexPrivateEndpoints(proto.Message): - r"""IndexPrivateEndpoints proto is used to provide paths for - users to send requests via private services access. + r"""IndexPrivateEndpoints proto is used to provide paths for users to + send requests via private endpoints (e.g. private service access, + private service connect). To send request via private service + access, use match_grpc_address. To send request via private service + connect, use service_attachment. Attributes: match_grpc_address (str): Output only. The ip address used to send match gRPC requests. + service_attachment (str): + Output only. The name of the service + attachment resource. Populated if private + service connect is enabled. """ match_grpc_address = proto.Field(proto.STRING, number=1,) + service_attachment = proto.Field(proto.STRING, number=2,) __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/index_endpoint_service.py b/google/cloud/aiplatform_v1beta1/types/index_endpoint_service.py index 69840b8899..fa8928ca4f 100644 --- a/google/cloud/aiplatform_v1beta1/types/index_endpoint_service.py +++ b/google/cloud/aiplatform_v1beta1/types/index_endpoint_service.py @@ -36,6 +36,9 @@ "UndeployIndexRequest", "UndeployIndexResponse", "UndeployIndexOperationMetadata", + "MutateDeployedIndexRequest", + "MutateDeployedIndexResponse", + "MutateDeployedIndexOperationMetadata", }, ) @@ -289,4 +292,58 @@ class UndeployIndexOperationMetadata(proto.Message): ) +class MutateDeployedIndexRequest(proto.Message): + r"""Request message for + [IndexEndpointService.MutateDeployedIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.MutateDeployedIndex]. + + Attributes: + index_endpoint (str): + Required. The name of the IndexEndpoint resource into which + to deploy an Index. Format: + ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` + deployed_index (google.cloud.aiplatform_v1beta1.types.DeployedIndex): + Required. The DeployedIndex to be updated within the + IndexEndpoint. Currently, the updatable fields are + [DeployedIndex][automatic_resources] and + [DeployedIndex][dedicated_resources] + """ + + index_endpoint = proto.Field(proto.STRING, number=1,) + deployed_index = proto.Field( + proto.MESSAGE, number=2, message=gca_index_endpoint.DeployedIndex, + ) + + +class MutateDeployedIndexResponse(proto.Message): + r"""Response message for + [IndexEndpointService.MutateDeployedIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.MutateDeployedIndex]. + + Attributes: + deployed_index (google.cloud.aiplatform_v1beta1.types.DeployedIndex): + The DeployedIndex that had been updated in + the IndexEndpoint. + """ + + deployed_index = proto.Field( + proto.MESSAGE, number=1, message=gca_index_endpoint.DeployedIndex, + ) + + +class MutateDeployedIndexOperationMetadata(proto.Message): + r"""Runtime operation information for + [IndexEndpointService.MutateDeployedIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.MutateDeployedIndex]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + The operation generic information. + deployed_index_id (str): + The unique index id specified by user + """ + + generic_metadata = proto.Field( + proto.MESSAGE, number=1, message=operation.GenericOperationMetadata, + ) + deployed_index_id = proto.Field(proto.STRING, number=2,) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/job_service.py b/google/cloud/aiplatform_v1beta1/types/job_service.py index 4d34ca0100..0988a81f3c 100644 --- a/google/cloud/aiplatform_v1beta1/types/job_service.py +++ b/google/cloud/aiplatform_v1beta1/types/job_service.py @@ -633,7 +633,7 @@ class SearchModelDeploymentMonitoringStatsAnomaliesRequest(proto.Message): \`projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job} deployed_model_id (str): Required. The DeployedModel ID of the - [google.cloud.aiplatform.master.ModelDeploymentMonitoringObjectiveConfig.deployed_model_id]. + [ModelDeploymentMonitoringObjectiveConfig.deployed_model_id]. feature_display_name (str): The feature display name. If specified, only return the stats belonging to this feature. Format: diff --git a/google/cloud/aiplatform_v1beta1/types/metadata_schema.py b/google/cloud/aiplatform_v1beta1/types/metadata_schema.py index 34cca83420..69642111fb 100644 --- a/google/cloud/aiplatform_v1beta1/types/metadata_schema.py +++ b/google/cloud/aiplatform_v1beta1/types/metadata_schema.py @@ -34,7 +34,8 @@ class MetadataSchema(proto.Message): The version of the MetadataSchema. The version's format must match the following regular expression: ``^[0-9]+[.][0-9]+[.][0-9]+$``, which would allow to - order/compare different versions.Example: 1.0.0, 1.0.1, etc. + order/compare different versions. Example: 1.0.0, 1.0.1, + etc. schema (str): Required. The raw YAML string representation of the MetadataSchema. The combination of [MetadataSchema.version] diff --git a/google/cloud/aiplatform_v1beta1/types/model_deployment_monitoring_job.py b/google/cloud/aiplatform_v1beta1/types/model_deployment_monitoring_job.py index 5bf590b7c1..b9a106c639 100644 --- a/google/cloud/aiplatform_v1beta1/types/model_deployment_monitoring_job.py +++ b/google/cloud/aiplatform_v1beta1/types/model_deployment_monitoring_job.py @@ -162,9 +162,10 @@ class ModelDeploymentMonitoringJob(proto.Message): resources of this ModelDeploymentMonitoringJob will be secured by this key. enable_monitoring_pipeline_logs (bool): - If true, the scheduled monitoring pipeline status logs are - sent to Google Cloud Logging. Please note the logs incur - cost, which are subject to `Cloud Logging + If true, the scheduled monitoring pipeline logs are sent to + Google Cloud Logging, including pipeline status and + anomalies detected. Please note the logs incur cost, which + are subject to `Cloud Logging pricing `__. error (google.rpc.status_pb2.Status): Output only. Only populated when the job's state is diff --git a/google/cloud/aiplatform_v1beta1/types/model_monitoring.py b/google/cloud/aiplatform_v1beta1/types/model_monitoring.py index 05937fd16b..760ac63c2f 100644 --- a/google/cloud/aiplatform_v1beta1/types/model_monitoring.py +++ b/google/cloud/aiplatform_v1beta1/types/model_monitoring.py @@ -44,8 +44,8 @@ class ModelMonitoringObjectiveConfig(proto.Message): prediction_drift_detection_config (google.cloud.aiplatform_v1beta1.types.ModelMonitoringObjectiveConfig.PredictionDriftDetectionConfig): The config for drift of prediction data. explanation_config (google.cloud.aiplatform_v1beta1.types.ModelMonitoringObjectiveConfig.ExplanationConfig): - The config for integrated with Explainable - AI. + The config for integrating with Vertex + Explainable AI. """ class TrainingDataset(proto.Message): @@ -160,14 +160,14 @@ class PredictionDriftDetectionConfig(proto.Message): ) class ExplanationConfig(proto.Message): - r"""The config for integrated with Explainable AI. Only applicable if - the Model has explanation_spec populated. + r"""The config for integrating with Vertex Explainable AI. Only + applicable if the Model has explanation_spec populated. Attributes: enable_feature_attributes (bool): - If want to analyze the Explainable AI feature - attribute scores or not. If set to true, Vertex - AI will log the feature attributions from + If want to analyze the Vertex Explainable AI + feature attribute scores or not. If set to true, + Vertex AI will log the feature attributions from explain response and do the skew/drift detection for them. explanation_baseline (google.cloud.aiplatform_v1beta1.types.ModelMonitoringObjectiveConfig.ExplanationConfig.ExplanationBaseline): @@ -246,7 +246,7 @@ class PredictionFormat(proto.Enum): class ModelMonitoringAlertConfig(proto.Message): - r"""Next ID: 2 + r"""Next ID: 3 .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields @@ -255,6 +255,12 @@ class ModelMonitoringAlertConfig(proto.Message): Email alert config. This field is a member of `oneof`_ ``alert``. + enable_logging (bool): + Dump the anomalies to Cloud Logging. The anomalies will be + put to json payload encoded from proto + [google.cloud.aiplatform.logging.ModelMonitoringAnomaliesLogEntry][]. + This can be further sinked to Pub/Sub or any other services + supported by Cloud Logging. """ class EmailAlertConfig(proto.Message): @@ -270,6 +276,7 @@ class EmailAlertConfig(proto.Message): email_alert_config = proto.Field( proto.MESSAGE, number=1, oneof="alert", message=EmailAlertConfig, ) + enable_logging = proto.Field(proto.BOOL, number=2,) class ThresholdConfig(proto.Message): diff --git a/google/cloud/aiplatform_v1beta1/types/pipeline_job.py b/google/cloud/aiplatform_v1beta1/types/pipeline_job.py index 58e8dce723..c9c7d6d861 100644 --- a/google/cloud/aiplatform_v1beta1/types/pipeline_job.py +++ b/google/cloud/aiplatform_v1beta1/types/pipeline_job.py @@ -116,11 +116,16 @@ class RuntimeConfig(proto.Message): Attributes: parameters (Sequence[google.cloud.aiplatform_v1beta1.types.PipelineJob.RuntimeConfig.ParametersEntry]): - Deprecated. Use [RuntimeConfig.parameter_values] instead. - The runtime parameters of the PipelineJob. The parameters - will be passed into + Deprecated. Use + [RuntimeConfig.parameter_values][google.cloud.aiplatform.v1beta1.PipelineJob.RuntimeConfig.parameter_values] + instead. The runtime parameters of the PipelineJob. The + parameters will be passed into [PipelineJob.pipeline_spec][google.cloud.aiplatform.v1beta1.PipelineJob.pipeline_spec] - to replace the placeholders at runtime. + to replace the placeholders at runtime. This field is used + by pipelines built using + ``PipelineJob.pipeline_spec.schema_version`` 2.0.0 or lower, + such as pipelines built using Kubeflow Pipelines SDK 1.8 or + lower. gcs_output_directory (str): Required. A path in a Cloud Storage bucket, which will be treated as the root output directory of the pipeline. It is @@ -134,7 +139,11 @@ class RuntimeConfig(proto.Message): The runtime parameters of the PipelineJob. The parameters will be passed into [PipelineJob.pipeline_spec][google.cloud.aiplatform.v1beta1.PipelineJob.pipeline_spec] - to replace the placeholders at runtime. + to replace the placeholders at runtime. This field is used + by pipelines built using + ``PipelineJob.pipeline_spec.schema_version`` 2.1.0, such as + pipelines built using Kubeflow Pipelines SDK 1.9 or higher + and the v2 DSL. """ parameters = proto.MapField( diff --git a/google/cloud/aiplatform_v1beta1/types/pipeline_service.py b/google/cloud/aiplatform_v1beta1/types/pipeline_service.py index e2dc3139b6..902bee0ba8 100644 --- a/google/cloud/aiplatform_v1beta1/types/pipeline_service.py +++ b/google/cloud/aiplatform_v1beta1/types/pipeline_service.py @@ -229,6 +229,7 @@ class ListPipelineJobsRequest(proto.Message): comparisons, and ``:`` wildcard. for example, can check if pipeline's display_name contains *step* by doing display_name:"*step*" + - ``state``: Supports ``=`` and ``!=`` comparisons. - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, ``<=``, and ``>=`` comparisons. Values must be in RFC 3339 format. @@ -278,6 +279,7 @@ class ListPipelineJobsRequest(proto.Message): - ``create_time`` - ``update_time`` - ``end_time`` + - ``start_time`` """ parent = proto.Field(proto.STRING, number=1,) diff --git a/google/cloud/aiplatform_v1beta1/types/prediction_service.py b/google/cloud/aiplatform_v1beta1/types/prediction_service.py index 4b258f6fa6..45c6ac3206 100644 --- a/google/cloud/aiplatform_v1beta1/types/prediction_service.py +++ b/google/cloud/aiplatform_v1beta1/types/prediction_service.py @@ -83,14 +83,14 @@ class PredictResponse(proto.Message): ID of the Endpoint's DeployedModel that served this prediction. model (str): - Output only. The name of the Model this - DeployedModel, that served this prediction, was - created from. + Output only. The resource name of the Model + which is deployed as the DeployedModel that this + prediction hits. model_display_name (str): Output only. The [display name][google.cloud.aiplatform.v1beta1.Model.display_name] of - the Model this DeployedModel, that served this prediction, - was created from. + the Model which is deployed as the DeployedModel that this + prediction hits. """ predictions = proto.RepeatedField( diff --git a/google/cloud/aiplatform_v1beta1/types/study.py b/google/cloud/aiplatform_v1beta1/types/study.py index 7c46137bee..77032803f9 100644 --- a/google/cloud/aiplatform_v1beta1/types/study.py +++ b/google/cloud/aiplatform_v1beta1/types/study.py @@ -27,7 +27,8 @@ class Study(proto.Message): - r"""A message representing a Study. + r"""LINT.IfChange + A message representing a Study. Attributes: name (str): @@ -97,13 +98,14 @@ class Trial(proto.Message): client_id (str): Output only. The identifier of the client that originally requested this Trial. Each client is identified by a unique - client_id. When a client asks for a suggestion, Vizier will - assign it a Trial. The client should evaluate the Trial, - complete it, and report back to Vizier. If suggestion is - asked again by same client_id before the Trial is completed, - the same Trial will be returned. Multiple clients with - different client_ids can ask for suggestions simultaneously, - each of them will get their own Trial. + client_id. When a client asks for a suggestion, Vertex AI + Vizier will assign it a Trial. The client should evaluate + the Trial, complete it, and report back to Vertex AI Vizier. + If suggestion is asked again by same client_id before the + Trial is completed, the same Trial will be returned. + Multiple clients with different client_ids can ask for + suggestions simultaneously, each of them will get their own + Trial. infeasible_reason (str): Output only. A human readable string describing why the Trial is infeasible. This is set only if Trial state is @@ -208,9 +210,9 @@ class StudySpec(proto.Message): The search algorithm specified for the Study. observation_noise (google.cloud.aiplatform_v1beta1.types.StudySpec.ObservationNoise): The observation noise level of the study. - Currently only supported by the Vizier service. - Not supported by HyperparamterTuningJob or - TrainingPipeline. + Currently only supported by the Vertex AI Vizier + service. Not supported by HyperparamterTuningJob + or TrainingPipeline. measurement_selection_type (google.cloud.aiplatform_v1beta1.types.StudySpec.MeasurementSelectionType): Describe which measurement selection type will be used @@ -335,8 +337,8 @@ class DoubleValueSpec(proto.Message): to be a relatively good starting point. Unset value signals that there is no offered starting point. - Currently only supported by the Vizier service. Not - supported by HyperparamterTuningJob or TrainingPipeline. + Currently only supported by the Vertex AI Vizier service. + Not supported by HyperparamterTuningJob or TrainingPipeline. This field is a member of `oneof`_ ``_default_value``. """ @@ -360,8 +362,8 @@ class IntegerValueSpec(proto.Message): to be a relatively good starting point. Unset value signals that there is no offered starting point. - Currently only supported by the Vizier service. Not - supported by HyperparamterTuningJob or TrainingPipeline. + Currently only supported by the Vertex AI Vizier service. + Not supported by HyperparamterTuningJob or TrainingPipeline. This field is a member of `oneof`_ ``_default_value``. """ diff --git a/google/cloud/aiplatform_v1beta1/types/training_pipeline.py b/google/cloud/aiplatform_v1beta1/types/training_pipeline.py index 4ee0f8265c..6b9ee8c4dc 100644 --- a/google/cloud/aiplatform_v1beta1/types/training_pipeline.py +++ b/google/cloud/aiplatform_v1beta1/types/training_pipeline.py @@ -33,6 +33,7 @@ "FilterSplit", "PredefinedSplit", "TimestampSplit", + "StratifiedSplit", }, ) @@ -207,6 +208,12 @@ class InputDataConfig(proto.Message): Split based on the timestamp of the input data pieces. + This field is a member of `oneof`_ ``split``. + stratified_split (google.cloud.aiplatform_v1beta1.types.StratifiedSplit): + Supported only for tabular Datasets. + Split based on the distribution of the specified + column. + This field is a member of `oneof`_ ``split``. gcs_destination (google.cloud.aiplatform_v1beta1.types.GcsDestination): The Cloud Storage location where the training data is to be @@ -323,6 +330,9 @@ class InputDataConfig(proto.Message): timestamp_split = proto.Field( proto.MESSAGE, number=5, oneof="split", message="TimestampSplit", ) + stratified_split = proto.Field( + proto.MESSAGE, number=12, oneof="split", message="StratifiedSplit", + ) gcs_destination = proto.Field( proto.MESSAGE, number=8, oneof="destination", message=io.GcsDestination, ) @@ -457,4 +467,45 @@ class TimestampSplit(proto.Message): key = proto.Field(proto.STRING, number=4,) +class StratifiedSplit(proto.Message): + r"""Assigns input data to the training, validation, and test sets so + that the distribution of values found in the categorical column (as + specified by the ``key`` field) is mirrored within each split. The + fraction values determine the relative sizes of the splits. + + For example, if the specified column has three values, with 50% of + the rows having value "A", 25% value "B", and 25% value "C", and the + split fractions are specified as 80/10/10, then the training set + will constitute 80% of the training data, with about 50% of the + training set rows having the value "A" for the specified column, + about 25% having the value "B", and about 25% having the value "C". + + Only the top 500 occurring values are used; any values not in the + top 500 values are randomly assigned to a split. If less than three + rows contain a specific value, those rows are randomly assigned. + + Supported only for tabular Datasets. + + Attributes: + training_fraction (float): + The fraction of the input data that is to be + used to train the Model. + validation_fraction (float): + The fraction of the input data that is to be + used to validate the Model. + test_fraction (float): + The fraction of the input data that is to be + used to evaluate the Model. + key (str): + Required. The key is a name of one of the + Dataset's data columns. The key provided must be + for a categorical column. + """ + + training_fraction = proto.Field(proto.DOUBLE, number=1,) + validation_fraction = proto.Field(proto.DOUBLE, number=2,) + test_fraction = proto.Field(proto.DOUBLE, number=3,) + key = proto.Field(proto.STRING, number=4,) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/unmanaged_container_model.py b/google/cloud/aiplatform_v1beta1/types/unmanaged_container_model.py similarity index 81% rename from owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/unmanaged_container_model.py rename to google/cloud/aiplatform_v1beta1/types/unmanaged_container_model.py index a1c4174ed2..42ecf09fae 100644 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/unmanaged_container_model.py +++ b/google/cloud/aiplatform_v1beta1/types/unmanaged_container_model.py @@ -19,10 +19,7 @@ __protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'UnmanagedContainerModel', - }, + package="google.cloud.aiplatform.v1beta1", manifest={"UnmanagedContainerModel",}, ) @@ -43,19 +40,12 @@ class UnmanagedContainerModel(proto.Message): Model. """ - artifact_uri = proto.Field( - proto.STRING, - number=1, - ) + artifact_uri = proto.Field(proto.STRING, number=1,) predict_schemata = proto.Field( - proto.MESSAGE, - number=2, - message=model.PredictSchemata, + proto.MESSAGE, number=2, message=model.PredictSchemata, ) container_spec = proto.Field( - proto.MESSAGE, - number=3, - message=model.ModelContainerSpec, + proto.MESSAGE, number=3, message=model.ModelContainerSpec, ) diff --git a/owl-bot-staging/v1/.coveragerc b/owl-bot-staging/v1/.coveragerc deleted file mode 100644 index 01d28d4b2c..0000000000 --- a/owl-bot-staging/v1/.coveragerc +++ /dev/null @@ -1,17 +0,0 @@ -[run] -branch = True - -[report] -show_missing = True -omit = - google/cloud/aiplatform/v1/schema/trainingjob/definition/__init__.py -exclude_lines = - # Re-enable the standard pragma - pragma: NO COVER - # Ignore debug-only repr - def __repr__ - # Ignore pkg_resources exceptions. - # This is added at the module level as a safeguard for if someone - # generates the code and tries to run it without pip installing. This - # makes it virtually impossible to test properly. - except pkg_resources.DistributionNotFound diff --git a/owl-bot-staging/v1/MANIFEST.in b/owl-bot-staging/v1/MANIFEST.in deleted file mode 100644 index d55f1f202e..0000000000 --- a/owl-bot-staging/v1/MANIFEST.in +++ /dev/null @@ -1,2 +0,0 @@ -recursive-include google/cloud/aiplatform/v1/schema/trainingjob/definition *.py -recursive-include google/cloud/aiplatform/v1/schema/trainingjob/definition_v1 *.py diff --git a/owl-bot-staging/v1/README.rst b/owl-bot-staging/v1/README.rst deleted file mode 100644 index ad49c55e02..0000000000 --- a/owl-bot-staging/v1/README.rst +++ /dev/null @@ -1,49 +0,0 @@ -Python Client for Google Cloud Aiplatform V1 Schema Trainingjob Definition API -================================================= - -Quick Start ------------ - -In order to use this library, you first need to go through the following steps: - -1. `Select or create a Cloud Platform project.`_ -2. `Enable billing for your project.`_ -3. Enable the Google Cloud Aiplatform V1 Schema Trainingjob Definition API. -4. `Setup Authentication.`_ - -.. _Select or create a Cloud Platform project.: https://console.cloud.google.com/project -.. _Enable billing for your project.: https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project -.. _Setup Authentication.: https://googleapis.dev/python/google-api-core/latest/auth.html - -Installation -~~~~~~~~~~~~ - -Install this library in a `virtualenv`_ using pip. `virtualenv`_ is a tool to -create isolated Python environments. The basic problem it addresses is one of -dependencies and versions, and indirectly permissions. - -With `virtualenv`_, it's possible to install this library without needing system -install permissions, and without clashing with the installed system -dependencies. - -.. _`virtualenv`: https://virtualenv.pypa.io/en/latest/ - - -Mac/Linux -^^^^^^^^^ - -.. code-block:: console - - python3 -m venv - source /bin/activate - /bin/pip install /path/to/library - - -Windows -^^^^^^^ - -.. code-block:: console - - python3 -m venv - \Scripts\activate - \Scripts\pip.exe install \path\to\library diff --git a/owl-bot-staging/v1/docs/aiplatform_v1/dataset_service.rst b/owl-bot-staging/v1/docs/aiplatform_v1/dataset_service.rst deleted file mode 100644 index 79ddc4623f..0000000000 --- a/owl-bot-staging/v1/docs/aiplatform_v1/dataset_service.rst +++ /dev/null @@ -1,10 +0,0 @@ -DatasetService --------------------------------- - -.. automodule:: google.cloud.aiplatform_v1.services.dataset_service - :members: - :inherited-members: - -.. automodule:: google.cloud.aiplatform_v1.services.dataset_service.pagers - :members: - :inherited-members: diff --git a/owl-bot-staging/v1/docs/aiplatform_v1/endpoint_service.rst b/owl-bot-staging/v1/docs/aiplatform_v1/endpoint_service.rst deleted file mode 100644 index 3b900f851e..0000000000 --- a/owl-bot-staging/v1/docs/aiplatform_v1/endpoint_service.rst +++ /dev/null @@ -1,10 +0,0 @@ -EndpointService ---------------------------------- - -.. automodule:: google.cloud.aiplatform_v1.services.endpoint_service - :members: - :inherited-members: - -.. automodule:: google.cloud.aiplatform_v1.services.endpoint_service.pagers - :members: - :inherited-members: diff --git a/owl-bot-staging/v1/docs/aiplatform_v1/featurestore_online_serving_service.rst b/owl-bot-staging/v1/docs/aiplatform_v1/featurestore_online_serving_service.rst deleted file mode 100644 index ace5b9dd1a..0000000000 --- a/owl-bot-staging/v1/docs/aiplatform_v1/featurestore_online_serving_service.rst +++ /dev/null @@ -1,6 +0,0 @@ -FeaturestoreOnlineServingService --------------------------------------------------- - -.. automodule:: google.cloud.aiplatform_v1.services.featurestore_online_serving_service - :members: - :inherited-members: diff --git a/owl-bot-staging/v1/docs/aiplatform_v1/featurestore_service.rst b/owl-bot-staging/v1/docs/aiplatform_v1/featurestore_service.rst deleted file mode 100644 index 90a303a4c4..0000000000 --- a/owl-bot-staging/v1/docs/aiplatform_v1/featurestore_service.rst +++ /dev/null @@ -1,10 +0,0 @@ -FeaturestoreService -------------------------------------- - -.. automodule:: google.cloud.aiplatform_v1.services.featurestore_service - :members: - :inherited-members: - -.. automodule:: google.cloud.aiplatform_v1.services.featurestore_service.pagers - :members: - :inherited-members: diff --git a/owl-bot-staging/v1/docs/aiplatform_v1/index_endpoint_service.rst b/owl-bot-staging/v1/docs/aiplatform_v1/index_endpoint_service.rst deleted file mode 100644 index 9a87b81082..0000000000 --- a/owl-bot-staging/v1/docs/aiplatform_v1/index_endpoint_service.rst +++ /dev/null @@ -1,10 +0,0 @@ -IndexEndpointService --------------------------------------- - -.. automodule:: google.cloud.aiplatform_v1.services.index_endpoint_service - :members: - :inherited-members: - -.. automodule:: google.cloud.aiplatform_v1.services.index_endpoint_service.pagers - :members: - :inherited-members: diff --git a/owl-bot-staging/v1/docs/aiplatform_v1/index_service.rst b/owl-bot-staging/v1/docs/aiplatform_v1/index_service.rst deleted file mode 100644 index b07b444c23..0000000000 --- a/owl-bot-staging/v1/docs/aiplatform_v1/index_service.rst +++ /dev/null @@ -1,10 +0,0 @@ -IndexService ------------------------------- - -.. automodule:: google.cloud.aiplatform_v1.services.index_service - :members: - :inherited-members: - -.. automodule:: google.cloud.aiplatform_v1.services.index_service.pagers - :members: - :inherited-members: diff --git a/owl-bot-staging/v1/docs/aiplatform_v1/job_service.rst b/owl-bot-staging/v1/docs/aiplatform_v1/job_service.rst deleted file mode 100644 index 6afcbbb4d0..0000000000 --- a/owl-bot-staging/v1/docs/aiplatform_v1/job_service.rst +++ /dev/null @@ -1,10 +0,0 @@ -JobService ----------------------------- - -.. automodule:: google.cloud.aiplatform_v1.services.job_service - :members: - :inherited-members: - -.. automodule:: google.cloud.aiplatform_v1.services.job_service.pagers - :members: - :inherited-members: diff --git a/owl-bot-staging/v1/docs/aiplatform_v1/metadata_service.rst b/owl-bot-staging/v1/docs/aiplatform_v1/metadata_service.rst deleted file mode 100644 index 419fd0a850..0000000000 --- a/owl-bot-staging/v1/docs/aiplatform_v1/metadata_service.rst +++ /dev/null @@ -1,10 +0,0 @@ -MetadataService ---------------------------------- - -.. automodule:: google.cloud.aiplatform_v1.services.metadata_service - :members: - :inherited-members: - -.. automodule:: google.cloud.aiplatform_v1.services.metadata_service.pagers - :members: - :inherited-members: diff --git a/owl-bot-staging/v1/docs/aiplatform_v1/migration_service.rst b/owl-bot-staging/v1/docs/aiplatform_v1/migration_service.rst deleted file mode 100644 index ac0a5fb3aa..0000000000 --- a/owl-bot-staging/v1/docs/aiplatform_v1/migration_service.rst +++ /dev/null @@ -1,10 +0,0 @@ -MigrationService ----------------------------------- - -.. automodule:: google.cloud.aiplatform_v1.services.migration_service - :members: - :inherited-members: - -.. automodule:: google.cloud.aiplatform_v1.services.migration_service.pagers - :members: - :inherited-members: diff --git a/owl-bot-staging/v1/docs/aiplatform_v1/model_service.rst b/owl-bot-staging/v1/docs/aiplatform_v1/model_service.rst deleted file mode 100644 index 8baab43cbc..0000000000 --- a/owl-bot-staging/v1/docs/aiplatform_v1/model_service.rst +++ /dev/null @@ -1,10 +0,0 @@ -ModelService ------------------------------- - -.. automodule:: google.cloud.aiplatform_v1.services.model_service - :members: - :inherited-members: - -.. automodule:: google.cloud.aiplatform_v1.services.model_service.pagers - :members: - :inherited-members: diff --git a/owl-bot-staging/v1/docs/aiplatform_v1/pipeline_service.rst b/owl-bot-staging/v1/docs/aiplatform_v1/pipeline_service.rst deleted file mode 100644 index bbf6b32092..0000000000 --- a/owl-bot-staging/v1/docs/aiplatform_v1/pipeline_service.rst +++ /dev/null @@ -1,10 +0,0 @@ -PipelineService ---------------------------------- - -.. automodule:: google.cloud.aiplatform_v1.services.pipeline_service - :members: - :inherited-members: - -.. automodule:: google.cloud.aiplatform_v1.services.pipeline_service.pagers - :members: - :inherited-members: diff --git a/owl-bot-staging/v1/docs/aiplatform_v1/prediction_service.rst b/owl-bot-staging/v1/docs/aiplatform_v1/prediction_service.rst deleted file mode 100644 index fdda504879..0000000000 --- a/owl-bot-staging/v1/docs/aiplatform_v1/prediction_service.rst +++ /dev/null @@ -1,6 +0,0 @@ -PredictionService ------------------------------------ - -.. automodule:: google.cloud.aiplatform_v1.services.prediction_service - :members: - :inherited-members: diff --git a/owl-bot-staging/v1/docs/aiplatform_v1/services.rst b/owl-bot-staging/v1/docs/aiplatform_v1/services.rst deleted file mode 100644 index 0a6443a972..0000000000 --- a/owl-bot-staging/v1/docs/aiplatform_v1/services.rst +++ /dev/null @@ -1,20 +0,0 @@ -Services for Google Cloud Aiplatform v1 API -=========================================== -.. toctree:: - :maxdepth: 2 - - dataset_service - endpoint_service - featurestore_online_serving_service - featurestore_service - index_endpoint_service - index_service - job_service - metadata_service - migration_service - model_service - pipeline_service - prediction_service - specialist_pool_service - tensorboard_service - vizier_service diff --git a/owl-bot-staging/v1/docs/aiplatform_v1/specialist_pool_service.rst b/owl-bot-staging/v1/docs/aiplatform_v1/specialist_pool_service.rst deleted file mode 100644 index 4a6f288894..0000000000 --- a/owl-bot-staging/v1/docs/aiplatform_v1/specialist_pool_service.rst +++ /dev/null @@ -1,10 +0,0 @@ -SpecialistPoolService ---------------------------------------- - -.. automodule:: google.cloud.aiplatform_v1.services.specialist_pool_service - :members: - :inherited-members: - -.. automodule:: google.cloud.aiplatform_v1.services.specialist_pool_service.pagers - :members: - :inherited-members: diff --git a/owl-bot-staging/v1/docs/aiplatform_v1/tensorboard_service.rst b/owl-bot-staging/v1/docs/aiplatform_v1/tensorboard_service.rst deleted file mode 100644 index 0fa17e10b8..0000000000 --- a/owl-bot-staging/v1/docs/aiplatform_v1/tensorboard_service.rst +++ /dev/null @@ -1,10 +0,0 @@ -TensorboardService ------------------------------------- - -.. automodule:: google.cloud.aiplatform_v1.services.tensorboard_service - :members: - :inherited-members: - -.. automodule:: google.cloud.aiplatform_v1.services.tensorboard_service.pagers - :members: - :inherited-members: diff --git a/owl-bot-staging/v1/docs/aiplatform_v1/types.rst b/owl-bot-staging/v1/docs/aiplatform_v1/types.rst deleted file mode 100644 index ad4454843f..0000000000 --- a/owl-bot-staging/v1/docs/aiplatform_v1/types.rst +++ /dev/null @@ -1,7 +0,0 @@ -Types for Google Cloud Aiplatform v1 API -======================================== - -.. automodule:: google.cloud.aiplatform_v1.types - :members: - :undoc-members: - :show-inheritance: diff --git a/owl-bot-staging/v1/docs/aiplatform_v1/vizier_service.rst b/owl-bot-staging/v1/docs/aiplatform_v1/vizier_service.rst deleted file mode 100644 index efdbafe3c8..0000000000 --- a/owl-bot-staging/v1/docs/aiplatform_v1/vizier_service.rst +++ /dev/null @@ -1,10 +0,0 @@ -VizierService -------------------------------- - -.. automodule:: google.cloud.aiplatform_v1.services.vizier_service - :members: - :inherited-members: - -.. automodule:: google.cloud.aiplatform_v1.services.vizier_service.pagers - :members: - :inherited-members: diff --git a/owl-bot-staging/v1/docs/conf.py b/owl-bot-staging/v1/docs/conf.py deleted file mode 100644 index 4e03a7096d..0000000000 --- a/owl-bot-staging/v1/docs/conf.py +++ /dev/null @@ -1,376 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# -# google-cloud-aiplatform-v1-schema-trainingjob-definition documentation build configuration file -# -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import sys -import os -import shlex - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, os.path.abspath("..")) - -__version__ = "0.1.0" - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -needs_sphinx = "1.6.3" - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - "sphinx.ext.autodoc", - "sphinx.ext.autosummary", - "sphinx.ext.intersphinx", - "sphinx.ext.coverage", - "sphinx.ext.napoleon", - "sphinx.ext.todo", - "sphinx.ext.viewcode", -] - -# autodoc/autosummary flags -autoclass_content = "both" -autodoc_default_flags = ["members"] -autosummary_generate = True - - -# Add any paths that contain templates here, relative to this directory. -templates_path = ["_templates"] - -# Allow markdown includes (so releases.md can include CHANGLEOG.md) -# http://www.sphinx-doc.org/en/master/markdown.html -source_parsers = {".md": "recommonmark.parser.CommonMarkParser"} - -# The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: -source_suffix = [".rst", ".md"] - -# The encoding of source files. -# source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = "index" - -# General information about the project. -project = u"google-cloud-aiplatform-v1-schema-trainingjob-definition" -copyright = u"2020, Google, LLC" -author = u"Google APIs" # TODO: autogenerate this bit - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The full version, including alpha/beta/rc tags. -release = __version__ -# The short X.Y version. -version = ".".join(release.split(".")[0:2]) - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# This is also used if you do content translation via gettext catalogs. -# Usually you set "language" from the command line for these cases. -language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = ["_build"] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = "sphinx" - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -# keep_warnings = False - -# If true, `todo` and `todoList` produce output, else they produce nothing. -todo_include_todos = True - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = "alabaster" - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -html_theme_options = { - "description": "Google Cloud Aiplatform V1 Schema Trainingjob Client Libraries for Python", - "github_user": "googleapis", - "github_repo": "google-cloud-python", - "github_banner": True, - "font_family": "'Roboto', Georgia, sans", - "head_font_family": "'Roboto', Georgia, serif", - "code_font_family": "'Roboto Mono', 'Consolas', monospace", -} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ["_static"] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -# html_extra_path = [] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -# html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Language to be used for generating the HTML full-text search index. -# Sphinx supports the following languages: -# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' -# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' -# html_search_language = 'en' - -# A dictionary with options for the search language support, empty by default. -# Now only 'ja' uses this config value -# html_search_options = {'type': 'default'} - -# The name of a javascript file (relative to the configuration directory) that -# implements a search results scorer. If empty, the default will be used. -# html_search_scorer = 'scorer.js' - -# Output file base name for HTML help builder. -htmlhelp_basename = "google-cloud-aiplatform-v1-schema-trainingjob-definition-doc" - -# -- Options for warnings ------------------------------------------------------ - - -suppress_warnings = [ - # Temporarily suppress this to avoid "more than one target found for - # cross-reference" warning, which are intractable for us to avoid while in - # a mono-repo. - # See https://github.com/sphinx-doc/sphinx/blob - # /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843 - "ref.python" -] - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - # 'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). - # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. - # 'preamble': '', - # Latex figure (float) alignment - # 'figure_align': 'htbp', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - ( - master_doc, - "google-cloud-aiplatform-v1-schema-trainingjob-definition.tex", - u"google-cloud-aiplatform-v1-schema-trainingjob-definition Documentation", - author, - "manual", - ) -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ( - master_doc, - "google-cloud-aiplatform-v1-schema-trainingjob-definition", - u"Google Cloud Aiplatform V1 Schema Trainingjob Definition Documentation", - [author], - 1, - ) -] - -# If true, show URL addresses after external links. -# man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ( - master_doc, - "google-cloud-aiplatform-v1-schema-trainingjob-definition", - u"google-cloud-aiplatform-v1-schema-trainingjob-definition Documentation", - author, - "google-cloud-aiplatform-v1-schema-trainingjob-definition", - "GAPIC library for Google Cloud Aiplatform V1 Schema Trainingjob Definition API", - "APIs", - ) -] - -# Documents to append as an appendix to all manuals. -# texinfo_appendices = [] - -# If false, no module index is generated. -# texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -# texinfo_no_detailmenu = False - - -# Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = { - "python": ("http://python.readthedocs.org/en/latest/", None), - "gax": ("https://gax-python.readthedocs.org/en/latest/", None), - "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), - "google-gax": ("https://gax-python.readthedocs.io/en/latest/", None), - "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None), - "grpc": ("https://grpc.io/grpc/python/", None), - "requests": ("http://requests.kennethreitz.org/en/stable/", None), - "proto": ("https://proto-plus-python.readthedocs.io/en/stable", None), - "protobuf": ("https://googleapis.dev/python/protobuf/latest/", None), -} - - -# Napoleon settings -napoleon_google_docstring = True -napoleon_numpy_docstring = True -napoleon_include_private_with_doc = False -napoleon_include_special_with_doc = True -napoleon_use_admonition_for_examples = False -napoleon_use_admonition_for_notes = False -napoleon_use_admonition_for_references = False -napoleon_use_ivar = False -napoleon_use_param = True -napoleon_use_rtype = True diff --git a/owl-bot-staging/v1/docs/definition_v1/services.rst b/owl-bot-staging/v1/docs/definition_v1/services.rst deleted file mode 100644 index ba6b1940e8..0000000000 --- a/owl-bot-staging/v1/docs/definition_v1/services.rst +++ /dev/null @@ -1,4 +0,0 @@ -Services for Google Cloud Aiplatform V1 Schema Trainingjob Definition v1 API -============================================================================ -.. toctree:: - :maxdepth: 2 diff --git a/owl-bot-staging/v1/docs/definition_v1/types.rst b/owl-bot-staging/v1/docs/definition_v1/types.rst deleted file mode 100644 index a1df2bce25..0000000000 --- a/owl-bot-staging/v1/docs/definition_v1/types.rst +++ /dev/null @@ -1,7 +0,0 @@ -Types for Google Cloud Aiplatform V1 Schema Trainingjob Definition v1 API -========================================================================= - -.. automodule:: google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types - :members: - :undoc-members: - :show-inheritance: diff --git a/owl-bot-staging/v1/docs/index.rst b/owl-bot-staging/v1/docs/index.rst deleted file mode 100644 index ad6ae57609..0000000000 --- a/owl-bot-staging/v1/docs/index.rst +++ /dev/null @@ -1,7 +0,0 @@ -API Reference -------------- -.. toctree:: - :maxdepth: 2 - - definition_v1/services - definition_v1/types diff --git a/owl-bot-staging/v1/docs/instance_v1/services.rst b/owl-bot-staging/v1/docs/instance_v1/services.rst deleted file mode 100644 index 50c011c69a..0000000000 --- a/owl-bot-staging/v1/docs/instance_v1/services.rst +++ /dev/null @@ -1,4 +0,0 @@ -Services for Google Cloud Aiplatform V1 Schema Predict Instance v1 API -====================================================================== -.. toctree:: - :maxdepth: 2 diff --git a/owl-bot-staging/v1/docs/instance_v1/types.rst b/owl-bot-staging/v1/docs/instance_v1/types.rst deleted file mode 100644 index 564ab013ee..0000000000 --- a/owl-bot-staging/v1/docs/instance_v1/types.rst +++ /dev/null @@ -1,7 +0,0 @@ -Types for Google Cloud Aiplatform V1 Schema Predict Instance v1 API -=================================================================== - -.. automodule:: google.cloud.aiplatform.v1.schema.predict.instance_v1.types - :members: - :undoc-members: - :show-inheritance: diff --git a/owl-bot-staging/v1/docs/params_v1/services.rst b/owl-bot-staging/v1/docs/params_v1/services.rst deleted file mode 100644 index bf08ea6e98..0000000000 --- a/owl-bot-staging/v1/docs/params_v1/services.rst +++ /dev/null @@ -1,4 +0,0 @@ -Services for Google Cloud Aiplatform V1 Schema Predict Params v1 API -==================================================================== -.. toctree:: - :maxdepth: 2 diff --git a/owl-bot-staging/v1/docs/params_v1/types.rst b/owl-bot-staging/v1/docs/params_v1/types.rst deleted file mode 100644 index 956ef5224d..0000000000 --- a/owl-bot-staging/v1/docs/params_v1/types.rst +++ /dev/null @@ -1,7 +0,0 @@ -Types for Google Cloud Aiplatform V1 Schema Predict Params v1 API -================================================================= - -.. automodule:: google.cloud.aiplatform.v1.schema.predict.params_v1.types - :members: - :undoc-members: - :show-inheritance: diff --git a/owl-bot-staging/v1/docs/prediction_v1/services.rst b/owl-bot-staging/v1/docs/prediction_v1/services.rst deleted file mode 100644 index ad6f034387..0000000000 --- a/owl-bot-staging/v1/docs/prediction_v1/services.rst +++ /dev/null @@ -1,4 +0,0 @@ -Services for Google Cloud Aiplatform V1 Schema Predict Prediction v1 API -======================================================================== -.. toctree:: - :maxdepth: 2 diff --git a/owl-bot-staging/v1/docs/prediction_v1/types.rst b/owl-bot-staging/v1/docs/prediction_v1/types.rst deleted file mode 100644 index a97faf34de..0000000000 --- a/owl-bot-staging/v1/docs/prediction_v1/types.rst +++ /dev/null @@ -1,7 +0,0 @@ -Types for Google Cloud Aiplatform V1 Schema Predict Prediction v1 API -===================================================================== - -.. automodule:: google.cloud.aiplatform.v1.schema.predict.prediction_v1.types - :members: - :undoc-members: - :show-inheritance: diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/__init__.py deleted file mode 100644 index efde2cc264..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/__init__.py +++ /dev/null @@ -1,927 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from google.cloud.aiplatform_v1.services.dataset_service.client import DatasetServiceClient -from google.cloud.aiplatform_v1.services.dataset_service.async_client import DatasetServiceAsyncClient -from google.cloud.aiplatform_v1.services.endpoint_service.client import EndpointServiceClient -from google.cloud.aiplatform_v1.services.endpoint_service.async_client import EndpointServiceAsyncClient -from google.cloud.aiplatform_v1.services.featurestore_online_serving_service.client import FeaturestoreOnlineServingServiceClient -from google.cloud.aiplatform_v1.services.featurestore_online_serving_service.async_client import FeaturestoreOnlineServingServiceAsyncClient -from google.cloud.aiplatform_v1.services.featurestore_service.client import FeaturestoreServiceClient -from google.cloud.aiplatform_v1.services.featurestore_service.async_client import FeaturestoreServiceAsyncClient -from google.cloud.aiplatform_v1.services.index_endpoint_service.client import IndexEndpointServiceClient -from google.cloud.aiplatform_v1.services.index_endpoint_service.async_client import IndexEndpointServiceAsyncClient -from google.cloud.aiplatform_v1.services.index_service.client import IndexServiceClient -from google.cloud.aiplatform_v1.services.index_service.async_client import IndexServiceAsyncClient -from google.cloud.aiplatform_v1.services.job_service.client import JobServiceClient -from google.cloud.aiplatform_v1.services.job_service.async_client import JobServiceAsyncClient -from google.cloud.aiplatform_v1.services.metadata_service.client import MetadataServiceClient -from google.cloud.aiplatform_v1.services.metadata_service.async_client import MetadataServiceAsyncClient -from google.cloud.aiplatform_v1.services.migration_service.client import MigrationServiceClient -from google.cloud.aiplatform_v1.services.migration_service.async_client import MigrationServiceAsyncClient -from google.cloud.aiplatform_v1.services.model_service.client import ModelServiceClient -from google.cloud.aiplatform_v1.services.model_service.async_client import ModelServiceAsyncClient -from google.cloud.aiplatform_v1.services.pipeline_service.client import PipelineServiceClient -from google.cloud.aiplatform_v1.services.pipeline_service.async_client import PipelineServiceAsyncClient -from google.cloud.aiplatform_v1.services.prediction_service.client import PredictionServiceClient -from google.cloud.aiplatform_v1.services.prediction_service.async_client import PredictionServiceAsyncClient -from google.cloud.aiplatform_v1.services.specialist_pool_service.client import SpecialistPoolServiceClient -from google.cloud.aiplatform_v1.services.specialist_pool_service.async_client import SpecialistPoolServiceAsyncClient -from google.cloud.aiplatform_v1.services.tensorboard_service.client import TensorboardServiceClient -from google.cloud.aiplatform_v1.services.tensorboard_service.async_client import TensorboardServiceAsyncClient -from google.cloud.aiplatform_v1.services.vizier_service.client import VizierServiceClient -from google.cloud.aiplatform_v1.services.vizier_service.async_client import VizierServiceAsyncClient - -from google.cloud.aiplatform_v1.types.accelerator_type import AcceleratorType -from google.cloud.aiplatform_v1.types.annotation import Annotation -from google.cloud.aiplatform_v1.types.annotation_spec import AnnotationSpec -from google.cloud.aiplatform_v1.types.artifact import Artifact -from google.cloud.aiplatform_v1.types.batch_prediction_job import BatchPredictionJob -from google.cloud.aiplatform_v1.types.completion_stats import CompletionStats -from google.cloud.aiplatform_v1.types.context import Context -from google.cloud.aiplatform_v1.types.custom_job import ContainerSpec -from google.cloud.aiplatform_v1.types.custom_job import CustomJob -from google.cloud.aiplatform_v1.types.custom_job import CustomJobSpec -from google.cloud.aiplatform_v1.types.custom_job import PythonPackageSpec -from google.cloud.aiplatform_v1.types.custom_job import Scheduling -from google.cloud.aiplatform_v1.types.custom_job import WorkerPoolSpec -from google.cloud.aiplatform_v1.types.data_item import DataItem -from google.cloud.aiplatform_v1.types.data_labeling_job import ActiveLearningConfig -from google.cloud.aiplatform_v1.types.data_labeling_job import DataLabelingJob -from google.cloud.aiplatform_v1.types.data_labeling_job import SampleConfig -from google.cloud.aiplatform_v1.types.data_labeling_job import TrainingConfig -from google.cloud.aiplatform_v1.types.dataset import Dataset -from google.cloud.aiplatform_v1.types.dataset import ExportDataConfig -from google.cloud.aiplatform_v1.types.dataset import ImportDataConfig -from google.cloud.aiplatform_v1.types.dataset_service import CreateDatasetOperationMetadata -from google.cloud.aiplatform_v1.types.dataset_service import CreateDatasetRequest -from google.cloud.aiplatform_v1.types.dataset_service import DeleteDatasetRequest -from google.cloud.aiplatform_v1.types.dataset_service import ExportDataOperationMetadata -from google.cloud.aiplatform_v1.types.dataset_service import ExportDataRequest -from google.cloud.aiplatform_v1.types.dataset_service import ExportDataResponse -from google.cloud.aiplatform_v1.types.dataset_service import GetAnnotationSpecRequest -from google.cloud.aiplatform_v1.types.dataset_service import GetDatasetRequest -from google.cloud.aiplatform_v1.types.dataset_service import ImportDataOperationMetadata -from google.cloud.aiplatform_v1.types.dataset_service import ImportDataRequest -from google.cloud.aiplatform_v1.types.dataset_service import ImportDataResponse -from google.cloud.aiplatform_v1.types.dataset_service import ListAnnotationsRequest -from google.cloud.aiplatform_v1.types.dataset_service import ListAnnotationsResponse -from google.cloud.aiplatform_v1.types.dataset_service import ListDataItemsRequest -from google.cloud.aiplatform_v1.types.dataset_service import ListDataItemsResponse -from google.cloud.aiplatform_v1.types.dataset_service import ListDatasetsRequest -from google.cloud.aiplatform_v1.types.dataset_service import ListDatasetsResponse -from google.cloud.aiplatform_v1.types.dataset_service import UpdateDatasetRequest -from google.cloud.aiplatform_v1.types.deployed_index_ref import DeployedIndexRef -from google.cloud.aiplatform_v1.types.deployed_model_ref import DeployedModelRef -from google.cloud.aiplatform_v1.types.encryption_spec import EncryptionSpec -from google.cloud.aiplatform_v1.types.endpoint import DeployedModel -from google.cloud.aiplatform_v1.types.endpoint import Endpoint -from google.cloud.aiplatform_v1.types.endpoint import PrivateEndpoints -from google.cloud.aiplatform_v1.types.endpoint_service import CreateEndpointOperationMetadata -from google.cloud.aiplatform_v1.types.endpoint_service import CreateEndpointRequest -from google.cloud.aiplatform_v1.types.endpoint_service import DeleteEndpointRequest -from google.cloud.aiplatform_v1.types.endpoint_service import DeployModelOperationMetadata -from google.cloud.aiplatform_v1.types.endpoint_service import DeployModelRequest -from google.cloud.aiplatform_v1.types.endpoint_service import DeployModelResponse -from google.cloud.aiplatform_v1.types.endpoint_service import GetEndpointRequest -from google.cloud.aiplatform_v1.types.endpoint_service import ListEndpointsRequest -from google.cloud.aiplatform_v1.types.endpoint_service import ListEndpointsResponse -from google.cloud.aiplatform_v1.types.endpoint_service import UndeployModelOperationMetadata -from google.cloud.aiplatform_v1.types.endpoint_service import UndeployModelRequest -from google.cloud.aiplatform_v1.types.endpoint_service import UndeployModelResponse -from google.cloud.aiplatform_v1.types.endpoint_service import UpdateEndpointRequest -from google.cloud.aiplatform_v1.types.entity_type import EntityType -from google.cloud.aiplatform_v1.types.env_var import EnvVar -from google.cloud.aiplatform_v1.types.event import Event -from google.cloud.aiplatform_v1.types.execution import Execution -from google.cloud.aiplatform_v1.types.explanation import Attribution -from google.cloud.aiplatform_v1.types.explanation import BlurBaselineConfig -from google.cloud.aiplatform_v1.types.explanation import Explanation -from google.cloud.aiplatform_v1.types.explanation import ExplanationMetadataOverride -from google.cloud.aiplatform_v1.types.explanation import ExplanationParameters -from google.cloud.aiplatform_v1.types.explanation import ExplanationSpec -from google.cloud.aiplatform_v1.types.explanation import ExplanationSpecOverride -from google.cloud.aiplatform_v1.types.explanation import FeatureNoiseSigma -from google.cloud.aiplatform_v1.types.explanation import IntegratedGradientsAttribution -from google.cloud.aiplatform_v1.types.explanation import ModelExplanation -from google.cloud.aiplatform_v1.types.explanation import SampledShapleyAttribution -from google.cloud.aiplatform_v1.types.explanation import SmoothGradConfig -from google.cloud.aiplatform_v1.types.explanation import XraiAttribution -from google.cloud.aiplatform_v1.types.explanation_metadata import ExplanationMetadata -from google.cloud.aiplatform_v1.types.feature import Feature -from google.cloud.aiplatform_v1.types.feature_monitoring_stats import FeatureStatsAnomaly -from google.cloud.aiplatform_v1.types.feature_selector import FeatureSelector -from google.cloud.aiplatform_v1.types.feature_selector import IdMatcher -from google.cloud.aiplatform_v1.types.featurestore import Featurestore -from google.cloud.aiplatform_v1.types.featurestore_online_service import FeatureValue -from google.cloud.aiplatform_v1.types.featurestore_online_service import FeatureValueList -from google.cloud.aiplatform_v1.types.featurestore_online_service import ReadFeatureValuesRequest -from google.cloud.aiplatform_v1.types.featurestore_online_service import ReadFeatureValuesResponse -from google.cloud.aiplatform_v1.types.featurestore_online_service import StreamingReadFeatureValuesRequest -from google.cloud.aiplatform_v1.types.featurestore_service import BatchCreateFeaturesOperationMetadata -from google.cloud.aiplatform_v1.types.featurestore_service import BatchCreateFeaturesRequest -from google.cloud.aiplatform_v1.types.featurestore_service import BatchCreateFeaturesResponse -from google.cloud.aiplatform_v1.types.featurestore_service import BatchReadFeatureValuesOperationMetadata -from google.cloud.aiplatform_v1.types.featurestore_service import BatchReadFeatureValuesRequest -from google.cloud.aiplatform_v1.types.featurestore_service import BatchReadFeatureValuesResponse -from google.cloud.aiplatform_v1.types.featurestore_service import CreateEntityTypeOperationMetadata -from google.cloud.aiplatform_v1.types.featurestore_service import CreateEntityTypeRequest -from google.cloud.aiplatform_v1.types.featurestore_service import CreateFeatureOperationMetadata -from google.cloud.aiplatform_v1.types.featurestore_service import CreateFeatureRequest -from google.cloud.aiplatform_v1.types.featurestore_service import CreateFeaturestoreOperationMetadata -from google.cloud.aiplatform_v1.types.featurestore_service import CreateFeaturestoreRequest -from google.cloud.aiplatform_v1.types.featurestore_service import DeleteEntityTypeRequest -from google.cloud.aiplatform_v1.types.featurestore_service import DeleteFeatureRequest -from google.cloud.aiplatform_v1.types.featurestore_service import DeleteFeaturestoreRequest -from google.cloud.aiplatform_v1.types.featurestore_service import DestinationFeatureSetting -from google.cloud.aiplatform_v1.types.featurestore_service import ExportFeatureValuesOperationMetadata -from google.cloud.aiplatform_v1.types.featurestore_service import ExportFeatureValuesRequest -from google.cloud.aiplatform_v1.types.featurestore_service import ExportFeatureValuesResponse -from google.cloud.aiplatform_v1.types.featurestore_service import FeatureValueDestination -from google.cloud.aiplatform_v1.types.featurestore_service import GetEntityTypeRequest -from google.cloud.aiplatform_v1.types.featurestore_service import GetFeatureRequest -from google.cloud.aiplatform_v1.types.featurestore_service import GetFeaturestoreRequest -from google.cloud.aiplatform_v1.types.featurestore_service import ImportFeatureValuesOperationMetadata -from google.cloud.aiplatform_v1.types.featurestore_service import ImportFeatureValuesRequest -from google.cloud.aiplatform_v1.types.featurestore_service import ImportFeatureValuesResponse -from google.cloud.aiplatform_v1.types.featurestore_service import ListEntityTypesRequest -from google.cloud.aiplatform_v1.types.featurestore_service import ListEntityTypesResponse -from google.cloud.aiplatform_v1.types.featurestore_service import ListFeaturesRequest -from google.cloud.aiplatform_v1.types.featurestore_service import ListFeaturesResponse -from google.cloud.aiplatform_v1.types.featurestore_service import ListFeaturestoresRequest -from google.cloud.aiplatform_v1.types.featurestore_service import ListFeaturestoresResponse -from google.cloud.aiplatform_v1.types.featurestore_service import SearchFeaturesRequest -from google.cloud.aiplatform_v1.types.featurestore_service import SearchFeaturesResponse -from google.cloud.aiplatform_v1.types.featurestore_service import UpdateEntityTypeRequest -from google.cloud.aiplatform_v1.types.featurestore_service import UpdateFeatureRequest -from google.cloud.aiplatform_v1.types.featurestore_service import UpdateFeaturestoreOperationMetadata -from google.cloud.aiplatform_v1.types.featurestore_service import UpdateFeaturestoreRequest -from google.cloud.aiplatform_v1.types.hyperparameter_tuning_job import HyperparameterTuningJob -from google.cloud.aiplatform_v1.types.index import Index -from google.cloud.aiplatform_v1.types.index_endpoint import DeployedIndex -from google.cloud.aiplatform_v1.types.index_endpoint import DeployedIndexAuthConfig -from google.cloud.aiplatform_v1.types.index_endpoint import IndexEndpoint -from google.cloud.aiplatform_v1.types.index_endpoint import IndexPrivateEndpoints -from google.cloud.aiplatform_v1.types.index_endpoint_service import CreateIndexEndpointOperationMetadata -from google.cloud.aiplatform_v1.types.index_endpoint_service import CreateIndexEndpointRequest -from google.cloud.aiplatform_v1.types.index_endpoint_service import DeleteIndexEndpointRequest -from google.cloud.aiplatform_v1.types.index_endpoint_service import DeployIndexOperationMetadata -from google.cloud.aiplatform_v1.types.index_endpoint_service import DeployIndexRequest -from google.cloud.aiplatform_v1.types.index_endpoint_service import DeployIndexResponse -from google.cloud.aiplatform_v1.types.index_endpoint_service import GetIndexEndpointRequest -from google.cloud.aiplatform_v1.types.index_endpoint_service import ListIndexEndpointsRequest -from google.cloud.aiplatform_v1.types.index_endpoint_service import ListIndexEndpointsResponse -from google.cloud.aiplatform_v1.types.index_endpoint_service import MutateDeployedIndexOperationMetadata -from google.cloud.aiplatform_v1.types.index_endpoint_service import MutateDeployedIndexRequest -from google.cloud.aiplatform_v1.types.index_endpoint_service import MutateDeployedIndexResponse -from google.cloud.aiplatform_v1.types.index_endpoint_service import UndeployIndexOperationMetadata -from google.cloud.aiplatform_v1.types.index_endpoint_service import UndeployIndexRequest -from google.cloud.aiplatform_v1.types.index_endpoint_service import UndeployIndexResponse -from google.cloud.aiplatform_v1.types.index_endpoint_service import UpdateIndexEndpointRequest -from google.cloud.aiplatform_v1.types.index_service import CreateIndexOperationMetadata -from google.cloud.aiplatform_v1.types.index_service import CreateIndexRequest -from google.cloud.aiplatform_v1.types.index_service import DeleteIndexRequest -from google.cloud.aiplatform_v1.types.index_service import GetIndexRequest -from google.cloud.aiplatform_v1.types.index_service import ListIndexesRequest -from google.cloud.aiplatform_v1.types.index_service import ListIndexesResponse -from google.cloud.aiplatform_v1.types.index_service import NearestNeighborSearchOperationMetadata -from google.cloud.aiplatform_v1.types.index_service import UpdateIndexOperationMetadata -from google.cloud.aiplatform_v1.types.index_service import UpdateIndexRequest -from google.cloud.aiplatform_v1.types.io import AvroSource -from google.cloud.aiplatform_v1.types.io import BigQueryDestination -from google.cloud.aiplatform_v1.types.io import BigQuerySource -from google.cloud.aiplatform_v1.types.io import ContainerRegistryDestination -from google.cloud.aiplatform_v1.types.io import CsvDestination -from google.cloud.aiplatform_v1.types.io import CsvSource -from google.cloud.aiplatform_v1.types.io import GcsDestination -from google.cloud.aiplatform_v1.types.io import GcsSource -from google.cloud.aiplatform_v1.types.io import TFRecordDestination -from google.cloud.aiplatform_v1.types.job_service import CancelBatchPredictionJobRequest -from google.cloud.aiplatform_v1.types.job_service import CancelCustomJobRequest -from google.cloud.aiplatform_v1.types.job_service import CancelDataLabelingJobRequest -from google.cloud.aiplatform_v1.types.job_service import CancelHyperparameterTuningJobRequest -from google.cloud.aiplatform_v1.types.job_service import CreateBatchPredictionJobRequest -from google.cloud.aiplatform_v1.types.job_service import CreateCustomJobRequest -from google.cloud.aiplatform_v1.types.job_service import CreateDataLabelingJobRequest -from google.cloud.aiplatform_v1.types.job_service import CreateHyperparameterTuningJobRequest -from google.cloud.aiplatform_v1.types.job_service import CreateModelDeploymentMonitoringJobRequest -from google.cloud.aiplatform_v1.types.job_service import DeleteBatchPredictionJobRequest -from google.cloud.aiplatform_v1.types.job_service import DeleteCustomJobRequest -from google.cloud.aiplatform_v1.types.job_service import DeleteDataLabelingJobRequest -from google.cloud.aiplatform_v1.types.job_service import DeleteHyperparameterTuningJobRequest -from google.cloud.aiplatform_v1.types.job_service import DeleteModelDeploymentMonitoringJobRequest -from google.cloud.aiplatform_v1.types.job_service import GetBatchPredictionJobRequest -from google.cloud.aiplatform_v1.types.job_service import GetCustomJobRequest -from google.cloud.aiplatform_v1.types.job_service import GetDataLabelingJobRequest -from google.cloud.aiplatform_v1.types.job_service import GetHyperparameterTuningJobRequest -from google.cloud.aiplatform_v1.types.job_service import GetModelDeploymentMonitoringJobRequest -from google.cloud.aiplatform_v1.types.job_service import ListBatchPredictionJobsRequest -from google.cloud.aiplatform_v1.types.job_service import ListBatchPredictionJobsResponse -from google.cloud.aiplatform_v1.types.job_service import ListCustomJobsRequest -from google.cloud.aiplatform_v1.types.job_service import ListCustomJobsResponse -from google.cloud.aiplatform_v1.types.job_service import ListDataLabelingJobsRequest -from google.cloud.aiplatform_v1.types.job_service import ListDataLabelingJobsResponse -from google.cloud.aiplatform_v1.types.job_service import ListHyperparameterTuningJobsRequest -from google.cloud.aiplatform_v1.types.job_service import ListHyperparameterTuningJobsResponse -from google.cloud.aiplatform_v1.types.job_service import ListModelDeploymentMonitoringJobsRequest -from google.cloud.aiplatform_v1.types.job_service import ListModelDeploymentMonitoringJobsResponse -from google.cloud.aiplatform_v1.types.job_service import PauseModelDeploymentMonitoringJobRequest -from google.cloud.aiplatform_v1.types.job_service import ResumeModelDeploymentMonitoringJobRequest -from google.cloud.aiplatform_v1.types.job_service import SearchModelDeploymentMonitoringStatsAnomaliesRequest -from google.cloud.aiplatform_v1.types.job_service import SearchModelDeploymentMonitoringStatsAnomaliesResponse -from google.cloud.aiplatform_v1.types.job_service import UpdateModelDeploymentMonitoringJobOperationMetadata -from google.cloud.aiplatform_v1.types.job_service import UpdateModelDeploymentMonitoringJobRequest -from google.cloud.aiplatform_v1.types.job_state import JobState -from google.cloud.aiplatform_v1.types.lineage_subgraph import LineageSubgraph -from google.cloud.aiplatform_v1.types.machine_resources import AutomaticResources -from google.cloud.aiplatform_v1.types.machine_resources import AutoscalingMetricSpec -from google.cloud.aiplatform_v1.types.machine_resources import BatchDedicatedResources -from google.cloud.aiplatform_v1.types.machine_resources import DedicatedResources -from google.cloud.aiplatform_v1.types.machine_resources import DiskSpec -from google.cloud.aiplatform_v1.types.machine_resources import MachineSpec -from google.cloud.aiplatform_v1.types.machine_resources import ResourcesConsumed -from google.cloud.aiplatform_v1.types.manual_batch_tuning_parameters import ManualBatchTuningParameters -from google.cloud.aiplatform_v1.types.metadata_schema import MetadataSchema -from google.cloud.aiplatform_v1.types.metadata_service import AddContextArtifactsAndExecutionsRequest -from google.cloud.aiplatform_v1.types.metadata_service import AddContextArtifactsAndExecutionsResponse -from google.cloud.aiplatform_v1.types.metadata_service import AddContextChildrenRequest -from google.cloud.aiplatform_v1.types.metadata_service import AddContextChildrenResponse -from google.cloud.aiplatform_v1.types.metadata_service import AddExecutionEventsRequest -from google.cloud.aiplatform_v1.types.metadata_service import AddExecutionEventsResponse -from google.cloud.aiplatform_v1.types.metadata_service import CreateArtifactRequest -from google.cloud.aiplatform_v1.types.metadata_service import CreateContextRequest -from google.cloud.aiplatform_v1.types.metadata_service import CreateExecutionRequest -from google.cloud.aiplatform_v1.types.metadata_service import CreateMetadataSchemaRequest -from google.cloud.aiplatform_v1.types.metadata_service import CreateMetadataStoreOperationMetadata -from google.cloud.aiplatform_v1.types.metadata_service import CreateMetadataStoreRequest -from google.cloud.aiplatform_v1.types.metadata_service import DeleteArtifactRequest -from google.cloud.aiplatform_v1.types.metadata_service import DeleteContextRequest -from google.cloud.aiplatform_v1.types.metadata_service import DeleteExecutionRequest -from google.cloud.aiplatform_v1.types.metadata_service import DeleteMetadataStoreOperationMetadata -from google.cloud.aiplatform_v1.types.metadata_service import DeleteMetadataStoreRequest -from google.cloud.aiplatform_v1.types.metadata_service import GetArtifactRequest -from google.cloud.aiplatform_v1.types.metadata_service import GetContextRequest -from google.cloud.aiplatform_v1.types.metadata_service import GetExecutionRequest -from google.cloud.aiplatform_v1.types.metadata_service import GetMetadataSchemaRequest -from google.cloud.aiplatform_v1.types.metadata_service import GetMetadataStoreRequest -from google.cloud.aiplatform_v1.types.metadata_service import ListArtifactsRequest -from google.cloud.aiplatform_v1.types.metadata_service import ListArtifactsResponse -from google.cloud.aiplatform_v1.types.metadata_service import ListContextsRequest -from google.cloud.aiplatform_v1.types.metadata_service import ListContextsResponse -from google.cloud.aiplatform_v1.types.metadata_service import ListExecutionsRequest -from google.cloud.aiplatform_v1.types.metadata_service import ListExecutionsResponse -from google.cloud.aiplatform_v1.types.metadata_service import ListMetadataSchemasRequest -from google.cloud.aiplatform_v1.types.metadata_service import ListMetadataSchemasResponse -from google.cloud.aiplatform_v1.types.metadata_service import ListMetadataStoresRequest -from google.cloud.aiplatform_v1.types.metadata_service import ListMetadataStoresResponse -from google.cloud.aiplatform_v1.types.metadata_service import PurgeArtifactsMetadata -from google.cloud.aiplatform_v1.types.metadata_service import PurgeArtifactsRequest -from google.cloud.aiplatform_v1.types.metadata_service import PurgeArtifactsResponse -from google.cloud.aiplatform_v1.types.metadata_service import PurgeContextsMetadata -from google.cloud.aiplatform_v1.types.metadata_service import PurgeContextsRequest -from google.cloud.aiplatform_v1.types.metadata_service import PurgeContextsResponse -from google.cloud.aiplatform_v1.types.metadata_service import PurgeExecutionsMetadata -from google.cloud.aiplatform_v1.types.metadata_service import PurgeExecutionsRequest -from google.cloud.aiplatform_v1.types.metadata_service import PurgeExecutionsResponse -from google.cloud.aiplatform_v1.types.metadata_service import QueryArtifactLineageSubgraphRequest -from google.cloud.aiplatform_v1.types.metadata_service import QueryContextLineageSubgraphRequest -from google.cloud.aiplatform_v1.types.metadata_service import QueryExecutionInputsAndOutputsRequest -from google.cloud.aiplatform_v1.types.metadata_service import UpdateArtifactRequest -from google.cloud.aiplatform_v1.types.metadata_service import UpdateContextRequest -from google.cloud.aiplatform_v1.types.metadata_service import UpdateExecutionRequest -from google.cloud.aiplatform_v1.types.metadata_store import MetadataStore -from google.cloud.aiplatform_v1.types.migratable_resource import MigratableResource -from google.cloud.aiplatform_v1.types.migration_service import BatchMigrateResourcesOperationMetadata -from google.cloud.aiplatform_v1.types.migration_service import BatchMigrateResourcesRequest -from google.cloud.aiplatform_v1.types.migration_service import BatchMigrateResourcesResponse -from google.cloud.aiplatform_v1.types.migration_service import MigrateResourceRequest -from google.cloud.aiplatform_v1.types.migration_service import MigrateResourceResponse -from google.cloud.aiplatform_v1.types.migration_service import SearchMigratableResourcesRequest -from google.cloud.aiplatform_v1.types.migration_service import SearchMigratableResourcesResponse -from google.cloud.aiplatform_v1.types.model import Model -from google.cloud.aiplatform_v1.types.model import ModelContainerSpec -from google.cloud.aiplatform_v1.types.model import Port -from google.cloud.aiplatform_v1.types.model import PredictSchemata -from google.cloud.aiplatform_v1.types.model_deployment_monitoring_job import ModelDeploymentMonitoringBigQueryTable -from google.cloud.aiplatform_v1.types.model_deployment_monitoring_job import ModelDeploymentMonitoringJob -from google.cloud.aiplatform_v1.types.model_deployment_monitoring_job import ModelDeploymentMonitoringObjectiveConfig -from google.cloud.aiplatform_v1.types.model_deployment_monitoring_job import ModelDeploymentMonitoringScheduleConfig -from google.cloud.aiplatform_v1.types.model_deployment_monitoring_job import ModelMonitoringStatsAnomalies -from google.cloud.aiplatform_v1.types.model_deployment_monitoring_job import ModelDeploymentMonitoringObjectiveType -from google.cloud.aiplatform_v1.types.model_evaluation import ModelEvaluation -from google.cloud.aiplatform_v1.types.model_evaluation_slice import ModelEvaluationSlice -from google.cloud.aiplatform_v1.types.model_monitoring import ModelMonitoringAlertConfig -from google.cloud.aiplatform_v1.types.model_monitoring import ModelMonitoringObjectiveConfig -from google.cloud.aiplatform_v1.types.model_monitoring import SamplingStrategy -from google.cloud.aiplatform_v1.types.model_monitoring import ThresholdConfig -from google.cloud.aiplatform_v1.types.model_service import DeleteModelRequest -from google.cloud.aiplatform_v1.types.model_service import ExportModelOperationMetadata -from google.cloud.aiplatform_v1.types.model_service import ExportModelRequest -from google.cloud.aiplatform_v1.types.model_service import ExportModelResponse -from google.cloud.aiplatform_v1.types.model_service import GetModelEvaluationRequest -from google.cloud.aiplatform_v1.types.model_service import GetModelEvaluationSliceRequest -from google.cloud.aiplatform_v1.types.model_service import GetModelRequest -from google.cloud.aiplatform_v1.types.model_service import ListModelEvaluationSlicesRequest -from google.cloud.aiplatform_v1.types.model_service import ListModelEvaluationSlicesResponse -from google.cloud.aiplatform_v1.types.model_service import ListModelEvaluationsRequest -from google.cloud.aiplatform_v1.types.model_service import ListModelEvaluationsResponse -from google.cloud.aiplatform_v1.types.model_service import ListModelsRequest -from google.cloud.aiplatform_v1.types.model_service import ListModelsResponse -from google.cloud.aiplatform_v1.types.model_service import UpdateModelRequest -from google.cloud.aiplatform_v1.types.model_service import UploadModelOperationMetadata -from google.cloud.aiplatform_v1.types.model_service import UploadModelRequest -from google.cloud.aiplatform_v1.types.model_service import UploadModelResponse -from google.cloud.aiplatform_v1.types.operation import DeleteOperationMetadata -from google.cloud.aiplatform_v1.types.operation import GenericOperationMetadata -from google.cloud.aiplatform_v1.types.pipeline_job import PipelineJob -from google.cloud.aiplatform_v1.types.pipeline_job import PipelineJobDetail -from google.cloud.aiplatform_v1.types.pipeline_job import PipelineTaskDetail -from google.cloud.aiplatform_v1.types.pipeline_job import PipelineTaskExecutorDetail -from google.cloud.aiplatform_v1.types.pipeline_service import CancelPipelineJobRequest -from google.cloud.aiplatform_v1.types.pipeline_service import CancelTrainingPipelineRequest -from google.cloud.aiplatform_v1.types.pipeline_service import CreatePipelineJobRequest -from google.cloud.aiplatform_v1.types.pipeline_service import CreateTrainingPipelineRequest -from google.cloud.aiplatform_v1.types.pipeline_service import DeletePipelineJobRequest -from google.cloud.aiplatform_v1.types.pipeline_service import DeleteTrainingPipelineRequest -from google.cloud.aiplatform_v1.types.pipeline_service import GetPipelineJobRequest -from google.cloud.aiplatform_v1.types.pipeline_service import GetTrainingPipelineRequest -from google.cloud.aiplatform_v1.types.pipeline_service import ListPipelineJobsRequest -from google.cloud.aiplatform_v1.types.pipeline_service import ListPipelineJobsResponse -from google.cloud.aiplatform_v1.types.pipeline_service import ListTrainingPipelinesRequest -from google.cloud.aiplatform_v1.types.pipeline_service import ListTrainingPipelinesResponse -from google.cloud.aiplatform_v1.types.pipeline_state import PipelineState -from google.cloud.aiplatform_v1.types.prediction_service import ExplainRequest -from google.cloud.aiplatform_v1.types.prediction_service import ExplainResponse -from google.cloud.aiplatform_v1.types.prediction_service import PredictRequest -from google.cloud.aiplatform_v1.types.prediction_service import PredictResponse -from google.cloud.aiplatform_v1.types.prediction_service import RawPredictRequest -from google.cloud.aiplatform_v1.types.specialist_pool import SpecialistPool -from google.cloud.aiplatform_v1.types.specialist_pool_service import CreateSpecialistPoolOperationMetadata -from google.cloud.aiplatform_v1.types.specialist_pool_service import CreateSpecialistPoolRequest -from google.cloud.aiplatform_v1.types.specialist_pool_service import DeleteSpecialistPoolRequest -from google.cloud.aiplatform_v1.types.specialist_pool_service import GetSpecialistPoolRequest -from google.cloud.aiplatform_v1.types.specialist_pool_service import ListSpecialistPoolsRequest -from google.cloud.aiplatform_v1.types.specialist_pool_service import ListSpecialistPoolsResponse -from google.cloud.aiplatform_v1.types.specialist_pool_service import UpdateSpecialistPoolOperationMetadata -from google.cloud.aiplatform_v1.types.specialist_pool_service import UpdateSpecialistPoolRequest -from google.cloud.aiplatform_v1.types.study import Measurement -from google.cloud.aiplatform_v1.types.study import Study -from google.cloud.aiplatform_v1.types.study import StudySpec -from google.cloud.aiplatform_v1.types.study import Trial -from google.cloud.aiplatform_v1.types.tensorboard import Tensorboard -from google.cloud.aiplatform_v1.types.tensorboard_data import Scalar -from google.cloud.aiplatform_v1.types.tensorboard_data import TensorboardBlob -from google.cloud.aiplatform_v1.types.tensorboard_data import TensorboardBlobSequence -from google.cloud.aiplatform_v1.types.tensorboard_data import TensorboardTensor -from google.cloud.aiplatform_v1.types.tensorboard_data import TimeSeriesData -from google.cloud.aiplatform_v1.types.tensorboard_data import TimeSeriesDataPoint -from google.cloud.aiplatform_v1.types.tensorboard_experiment import TensorboardExperiment -from google.cloud.aiplatform_v1.types.tensorboard_run import TensorboardRun -from google.cloud.aiplatform_v1.types.tensorboard_service import BatchCreateTensorboardRunsRequest -from google.cloud.aiplatform_v1.types.tensorboard_service import BatchCreateTensorboardRunsResponse -from google.cloud.aiplatform_v1.types.tensorboard_service import BatchCreateTensorboardTimeSeriesRequest -from google.cloud.aiplatform_v1.types.tensorboard_service import BatchCreateTensorboardTimeSeriesResponse -from google.cloud.aiplatform_v1.types.tensorboard_service import BatchReadTensorboardTimeSeriesDataRequest -from google.cloud.aiplatform_v1.types.tensorboard_service import BatchReadTensorboardTimeSeriesDataResponse -from google.cloud.aiplatform_v1.types.tensorboard_service import CreateTensorboardExperimentRequest -from google.cloud.aiplatform_v1.types.tensorboard_service import CreateTensorboardOperationMetadata -from google.cloud.aiplatform_v1.types.tensorboard_service import CreateTensorboardRequest -from google.cloud.aiplatform_v1.types.tensorboard_service import CreateTensorboardRunRequest -from google.cloud.aiplatform_v1.types.tensorboard_service import CreateTensorboardTimeSeriesRequest -from google.cloud.aiplatform_v1.types.tensorboard_service import DeleteTensorboardExperimentRequest -from google.cloud.aiplatform_v1.types.tensorboard_service import DeleteTensorboardRequest -from google.cloud.aiplatform_v1.types.tensorboard_service import DeleteTensorboardRunRequest -from google.cloud.aiplatform_v1.types.tensorboard_service import DeleteTensorboardTimeSeriesRequest -from google.cloud.aiplatform_v1.types.tensorboard_service import ExportTensorboardTimeSeriesDataRequest -from google.cloud.aiplatform_v1.types.tensorboard_service import ExportTensorboardTimeSeriesDataResponse -from google.cloud.aiplatform_v1.types.tensorboard_service import GetTensorboardExperimentRequest -from google.cloud.aiplatform_v1.types.tensorboard_service import GetTensorboardRequest -from google.cloud.aiplatform_v1.types.tensorboard_service import GetTensorboardRunRequest -from google.cloud.aiplatform_v1.types.tensorboard_service import GetTensorboardTimeSeriesRequest -from google.cloud.aiplatform_v1.types.tensorboard_service import ListTensorboardExperimentsRequest -from google.cloud.aiplatform_v1.types.tensorboard_service import ListTensorboardExperimentsResponse -from google.cloud.aiplatform_v1.types.tensorboard_service import ListTensorboardRunsRequest -from google.cloud.aiplatform_v1.types.tensorboard_service import ListTensorboardRunsResponse -from google.cloud.aiplatform_v1.types.tensorboard_service import ListTensorboardsRequest -from google.cloud.aiplatform_v1.types.tensorboard_service import ListTensorboardsResponse -from google.cloud.aiplatform_v1.types.tensorboard_service import ListTensorboardTimeSeriesRequest -from google.cloud.aiplatform_v1.types.tensorboard_service import ListTensorboardTimeSeriesResponse -from google.cloud.aiplatform_v1.types.tensorboard_service import ReadTensorboardBlobDataRequest -from google.cloud.aiplatform_v1.types.tensorboard_service import ReadTensorboardBlobDataResponse -from google.cloud.aiplatform_v1.types.tensorboard_service import ReadTensorboardTimeSeriesDataRequest -from google.cloud.aiplatform_v1.types.tensorboard_service import ReadTensorboardTimeSeriesDataResponse -from google.cloud.aiplatform_v1.types.tensorboard_service import UpdateTensorboardExperimentRequest -from google.cloud.aiplatform_v1.types.tensorboard_service import UpdateTensorboardOperationMetadata -from google.cloud.aiplatform_v1.types.tensorboard_service import UpdateTensorboardRequest -from google.cloud.aiplatform_v1.types.tensorboard_service import UpdateTensorboardRunRequest -from google.cloud.aiplatform_v1.types.tensorboard_service import UpdateTensorboardTimeSeriesRequest -from google.cloud.aiplatform_v1.types.tensorboard_service import WriteTensorboardExperimentDataRequest -from google.cloud.aiplatform_v1.types.tensorboard_service import WriteTensorboardExperimentDataResponse -from google.cloud.aiplatform_v1.types.tensorboard_service import WriteTensorboardRunDataRequest -from google.cloud.aiplatform_v1.types.tensorboard_service import WriteTensorboardRunDataResponse -from google.cloud.aiplatform_v1.types.tensorboard_time_series import TensorboardTimeSeries -from google.cloud.aiplatform_v1.types.training_pipeline import FilterSplit -from google.cloud.aiplatform_v1.types.training_pipeline import FractionSplit -from google.cloud.aiplatform_v1.types.training_pipeline import InputDataConfig -from google.cloud.aiplatform_v1.types.training_pipeline import PredefinedSplit -from google.cloud.aiplatform_v1.types.training_pipeline import StratifiedSplit -from google.cloud.aiplatform_v1.types.training_pipeline import TimestampSplit -from google.cloud.aiplatform_v1.types.training_pipeline import TrainingPipeline -from google.cloud.aiplatform_v1.types.types import BoolArray -from google.cloud.aiplatform_v1.types.types import DoubleArray -from google.cloud.aiplatform_v1.types.types import Int64Array -from google.cloud.aiplatform_v1.types.types import StringArray -from google.cloud.aiplatform_v1.types.unmanaged_container_model import UnmanagedContainerModel -from google.cloud.aiplatform_v1.types.user_action_reference import UserActionReference -from google.cloud.aiplatform_v1.types.value import Value -from google.cloud.aiplatform_v1.types.vizier_service import AddTrialMeasurementRequest -from google.cloud.aiplatform_v1.types.vizier_service import CheckTrialEarlyStoppingStateMetatdata -from google.cloud.aiplatform_v1.types.vizier_service import CheckTrialEarlyStoppingStateRequest -from google.cloud.aiplatform_v1.types.vizier_service import CheckTrialEarlyStoppingStateResponse -from google.cloud.aiplatform_v1.types.vizier_service import CompleteTrialRequest -from google.cloud.aiplatform_v1.types.vizier_service import CreateStudyRequest -from google.cloud.aiplatform_v1.types.vizier_service import CreateTrialRequest -from google.cloud.aiplatform_v1.types.vizier_service import DeleteStudyRequest -from google.cloud.aiplatform_v1.types.vizier_service import DeleteTrialRequest -from google.cloud.aiplatform_v1.types.vizier_service import GetStudyRequest -from google.cloud.aiplatform_v1.types.vizier_service import GetTrialRequest -from google.cloud.aiplatform_v1.types.vizier_service import ListOptimalTrialsRequest -from google.cloud.aiplatform_v1.types.vizier_service import ListOptimalTrialsResponse -from google.cloud.aiplatform_v1.types.vizier_service import ListStudiesRequest -from google.cloud.aiplatform_v1.types.vizier_service import ListStudiesResponse -from google.cloud.aiplatform_v1.types.vizier_service import ListTrialsRequest -from google.cloud.aiplatform_v1.types.vizier_service import ListTrialsResponse -from google.cloud.aiplatform_v1.types.vizier_service import LookupStudyRequest -from google.cloud.aiplatform_v1.types.vizier_service import StopTrialRequest -from google.cloud.aiplatform_v1.types.vizier_service import SuggestTrialsMetadata -from google.cloud.aiplatform_v1.types.vizier_service import SuggestTrialsRequest -from google.cloud.aiplatform_v1.types.vizier_service import SuggestTrialsResponse - -__all__ = ('DatasetServiceClient', - 'DatasetServiceAsyncClient', - 'EndpointServiceClient', - 'EndpointServiceAsyncClient', - 'FeaturestoreOnlineServingServiceClient', - 'FeaturestoreOnlineServingServiceAsyncClient', - 'FeaturestoreServiceClient', - 'FeaturestoreServiceAsyncClient', - 'IndexEndpointServiceClient', - 'IndexEndpointServiceAsyncClient', - 'IndexServiceClient', - 'IndexServiceAsyncClient', - 'JobServiceClient', - 'JobServiceAsyncClient', - 'MetadataServiceClient', - 'MetadataServiceAsyncClient', - 'MigrationServiceClient', - 'MigrationServiceAsyncClient', - 'ModelServiceClient', - 'ModelServiceAsyncClient', - 'PipelineServiceClient', - 'PipelineServiceAsyncClient', - 'PredictionServiceClient', - 'PredictionServiceAsyncClient', - 'SpecialistPoolServiceClient', - 'SpecialistPoolServiceAsyncClient', - 'TensorboardServiceClient', - 'TensorboardServiceAsyncClient', - 'VizierServiceClient', - 'VizierServiceAsyncClient', - 'AcceleratorType', - 'Annotation', - 'AnnotationSpec', - 'Artifact', - 'BatchPredictionJob', - 'CompletionStats', - 'Context', - 'ContainerSpec', - 'CustomJob', - 'CustomJobSpec', - 'PythonPackageSpec', - 'Scheduling', - 'WorkerPoolSpec', - 'DataItem', - 'ActiveLearningConfig', - 'DataLabelingJob', - 'SampleConfig', - 'TrainingConfig', - 'Dataset', - 'ExportDataConfig', - 'ImportDataConfig', - 'CreateDatasetOperationMetadata', - 'CreateDatasetRequest', - 'DeleteDatasetRequest', - 'ExportDataOperationMetadata', - 'ExportDataRequest', - 'ExportDataResponse', - 'GetAnnotationSpecRequest', - 'GetDatasetRequest', - 'ImportDataOperationMetadata', - 'ImportDataRequest', - 'ImportDataResponse', - 'ListAnnotationsRequest', - 'ListAnnotationsResponse', - 'ListDataItemsRequest', - 'ListDataItemsResponse', - 'ListDatasetsRequest', - 'ListDatasetsResponse', - 'UpdateDatasetRequest', - 'DeployedIndexRef', - 'DeployedModelRef', - 'EncryptionSpec', - 'DeployedModel', - 'Endpoint', - 'PrivateEndpoints', - 'CreateEndpointOperationMetadata', - 'CreateEndpointRequest', - 'DeleteEndpointRequest', - 'DeployModelOperationMetadata', - 'DeployModelRequest', - 'DeployModelResponse', - 'GetEndpointRequest', - 'ListEndpointsRequest', - 'ListEndpointsResponse', - 'UndeployModelOperationMetadata', - 'UndeployModelRequest', - 'UndeployModelResponse', - 'UpdateEndpointRequest', - 'EntityType', - 'EnvVar', - 'Event', - 'Execution', - 'Attribution', - 'BlurBaselineConfig', - 'Explanation', - 'ExplanationMetadataOverride', - 'ExplanationParameters', - 'ExplanationSpec', - 'ExplanationSpecOverride', - 'FeatureNoiseSigma', - 'IntegratedGradientsAttribution', - 'ModelExplanation', - 'SampledShapleyAttribution', - 'SmoothGradConfig', - 'XraiAttribution', - 'ExplanationMetadata', - 'Feature', - 'FeatureStatsAnomaly', - 'FeatureSelector', - 'IdMatcher', - 'Featurestore', - 'FeatureValue', - 'FeatureValueList', - 'ReadFeatureValuesRequest', - 'ReadFeatureValuesResponse', - 'StreamingReadFeatureValuesRequest', - 'BatchCreateFeaturesOperationMetadata', - 'BatchCreateFeaturesRequest', - 'BatchCreateFeaturesResponse', - 'BatchReadFeatureValuesOperationMetadata', - 'BatchReadFeatureValuesRequest', - 'BatchReadFeatureValuesResponse', - 'CreateEntityTypeOperationMetadata', - 'CreateEntityTypeRequest', - 'CreateFeatureOperationMetadata', - 'CreateFeatureRequest', - 'CreateFeaturestoreOperationMetadata', - 'CreateFeaturestoreRequest', - 'DeleteEntityTypeRequest', - 'DeleteFeatureRequest', - 'DeleteFeaturestoreRequest', - 'DestinationFeatureSetting', - 'ExportFeatureValuesOperationMetadata', - 'ExportFeatureValuesRequest', - 'ExportFeatureValuesResponse', - 'FeatureValueDestination', - 'GetEntityTypeRequest', - 'GetFeatureRequest', - 'GetFeaturestoreRequest', - 'ImportFeatureValuesOperationMetadata', - 'ImportFeatureValuesRequest', - 'ImportFeatureValuesResponse', - 'ListEntityTypesRequest', - 'ListEntityTypesResponse', - 'ListFeaturesRequest', - 'ListFeaturesResponse', - 'ListFeaturestoresRequest', - 'ListFeaturestoresResponse', - 'SearchFeaturesRequest', - 'SearchFeaturesResponse', - 'UpdateEntityTypeRequest', - 'UpdateFeatureRequest', - 'UpdateFeaturestoreOperationMetadata', - 'UpdateFeaturestoreRequest', - 'HyperparameterTuningJob', - 'Index', - 'DeployedIndex', - 'DeployedIndexAuthConfig', - 'IndexEndpoint', - 'IndexPrivateEndpoints', - 'CreateIndexEndpointOperationMetadata', - 'CreateIndexEndpointRequest', - 'DeleteIndexEndpointRequest', - 'DeployIndexOperationMetadata', - 'DeployIndexRequest', - 'DeployIndexResponse', - 'GetIndexEndpointRequest', - 'ListIndexEndpointsRequest', - 'ListIndexEndpointsResponse', - 'MutateDeployedIndexOperationMetadata', - 'MutateDeployedIndexRequest', - 'MutateDeployedIndexResponse', - 'UndeployIndexOperationMetadata', - 'UndeployIndexRequest', - 'UndeployIndexResponse', - 'UpdateIndexEndpointRequest', - 'CreateIndexOperationMetadata', - 'CreateIndexRequest', - 'DeleteIndexRequest', - 'GetIndexRequest', - 'ListIndexesRequest', - 'ListIndexesResponse', - 'NearestNeighborSearchOperationMetadata', - 'UpdateIndexOperationMetadata', - 'UpdateIndexRequest', - 'AvroSource', - 'BigQueryDestination', - 'BigQuerySource', - 'ContainerRegistryDestination', - 'CsvDestination', - 'CsvSource', - 'GcsDestination', - 'GcsSource', - 'TFRecordDestination', - 'CancelBatchPredictionJobRequest', - 'CancelCustomJobRequest', - 'CancelDataLabelingJobRequest', - 'CancelHyperparameterTuningJobRequest', - 'CreateBatchPredictionJobRequest', - 'CreateCustomJobRequest', - 'CreateDataLabelingJobRequest', - 'CreateHyperparameterTuningJobRequest', - 'CreateModelDeploymentMonitoringJobRequest', - 'DeleteBatchPredictionJobRequest', - 'DeleteCustomJobRequest', - 'DeleteDataLabelingJobRequest', - 'DeleteHyperparameterTuningJobRequest', - 'DeleteModelDeploymentMonitoringJobRequest', - 'GetBatchPredictionJobRequest', - 'GetCustomJobRequest', - 'GetDataLabelingJobRequest', - 'GetHyperparameterTuningJobRequest', - 'GetModelDeploymentMonitoringJobRequest', - 'ListBatchPredictionJobsRequest', - 'ListBatchPredictionJobsResponse', - 'ListCustomJobsRequest', - 'ListCustomJobsResponse', - 'ListDataLabelingJobsRequest', - 'ListDataLabelingJobsResponse', - 'ListHyperparameterTuningJobsRequest', - 'ListHyperparameterTuningJobsResponse', - 'ListModelDeploymentMonitoringJobsRequest', - 'ListModelDeploymentMonitoringJobsResponse', - 'PauseModelDeploymentMonitoringJobRequest', - 'ResumeModelDeploymentMonitoringJobRequest', - 'SearchModelDeploymentMonitoringStatsAnomaliesRequest', - 'SearchModelDeploymentMonitoringStatsAnomaliesResponse', - 'UpdateModelDeploymentMonitoringJobOperationMetadata', - 'UpdateModelDeploymentMonitoringJobRequest', - 'JobState', - 'LineageSubgraph', - 'AutomaticResources', - 'AutoscalingMetricSpec', - 'BatchDedicatedResources', - 'DedicatedResources', - 'DiskSpec', - 'MachineSpec', - 'ResourcesConsumed', - 'ManualBatchTuningParameters', - 'MetadataSchema', - 'AddContextArtifactsAndExecutionsRequest', - 'AddContextArtifactsAndExecutionsResponse', - 'AddContextChildrenRequest', - 'AddContextChildrenResponse', - 'AddExecutionEventsRequest', - 'AddExecutionEventsResponse', - 'CreateArtifactRequest', - 'CreateContextRequest', - 'CreateExecutionRequest', - 'CreateMetadataSchemaRequest', - 'CreateMetadataStoreOperationMetadata', - 'CreateMetadataStoreRequest', - 'DeleteArtifactRequest', - 'DeleteContextRequest', - 'DeleteExecutionRequest', - 'DeleteMetadataStoreOperationMetadata', - 'DeleteMetadataStoreRequest', - 'GetArtifactRequest', - 'GetContextRequest', - 'GetExecutionRequest', - 'GetMetadataSchemaRequest', - 'GetMetadataStoreRequest', - 'ListArtifactsRequest', - 'ListArtifactsResponse', - 'ListContextsRequest', - 'ListContextsResponse', - 'ListExecutionsRequest', - 'ListExecutionsResponse', - 'ListMetadataSchemasRequest', - 'ListMetadataSchemasResponse', - 'ListMetadataStoresRequest', - 'ListMetadataStoresResponse', - 'PurgeArtifactsMetadata', - 'PurgeArtifactsRequest', - 'PurgeArtifactsResponse', - 'PurgeContextsMetadata', - 'PurgeContextsRequest', - 'PurgeContextsResponse', - 'PurgeExecutionsMetadata', - 'PurgeExecutionsRequest', - 'PurgeExecutionsResponse', - 'QueryArtifactLineageSubgraphRequest', - 'QueryContextLineageSubgraphRequest', - 'QueryExecutionInputsAndOutputsRequest', - 'UpdateArtifactRequest', - 'UpdateContextRequest', - 'UpdateExecutionRequest', - 'MetadataStore', - 'MigratableResource', - 'BatchMigrateResourcesOperationMetadata', - 'BatchMigrateResourcesRequest', - 'BatchMigrateResourcesResponse', - 'MigrateResourceRequest', - 'MigrateResourceResponse', - 'SearchMigratableResourcesRequest', - 'SearchMigratableResourcesResponse', - 'Model', - 'ModelContainerSpec', - 'Port', - 'PredictSchemata', - 'ModelDeploymentMonitoringBigQueryTable', - 'ModelDeploymentMonitoringJob', - 'ModelDeploymentMonitoringObjectiveConfig', - 'ModelDeploymentMonitoringScheduleConfig', - 'ModelMonitoringStatsAnomalies', - 'ModelDeploymentMonitoringObjectiveType', - 'ModelEvaluation', - 'ModelEvaluationSlice', - 'ModelMonitoringAlertConfig', - 'ModelMonitoringObjectiveConfig', - 'SamplingStrategy', - 'ThresholdConfig', - 'DeleteModelRequest', - 'ExportModelOperationMetadata', - 'ExportModelRequest', - 'ExportModelResponse', - 'GetModelEvaluationRequest', - 'GetModelEvaluationSliceRequest', - 'GetModelRequest', - 'ListModelEvaluationSlicesRequest', - 'ListModelEvaluationSlicesResponse', - 'ListModelEvaluationsRequest', - 'ListModelEvaluationsResponse', - 'ListModelsRequest', - 'ListModelsResponse', - 'UpdateModelRequest', - 'UploadModelOperationMetadata', - 'UploadModelRequest', - 'UploadModelResponse', - 'DeleteOperationMetadata', - 'GenericOperationMetadata', - 'PipelineJob', - 'PipelineJobDetail', - 'PipelineTaskDetail', - 'PipelineTaskExecutorDetail', - 'CancelPipelineJobRequest', - 'CancelTrainingPipelineRequest', - 'CreatePipelineJobRequest', - 'CreateTrainingPipelineRequest', - 'DeletePipelineJobRequest', - 'DeleteTrainingPipelineRequest', - 'GetPipelineJobRequest', - 'GetTrainingPipelineRequest', - 'ListPipelineJobsRequest', - 'ListPipelineJobsResponse', - 'ListTrainingPipelinesRequest', - 'ListTrainingPipelinesResponse', - 'PipelineState', - 'ExplainRequest', - 'ExplainResponse', - 'PredictRequest', - 'PredictResponse', - 'RawPredictRequest', - 'SpecialistPool', - 'CreateSpecialistPoolOperationMetadata', - 'CreateSpecialistPoolRequest', - 'DeleteSpecialistPoolRequest', - 'GetSpecialistPoolRequest', - 'ListSpecialistPoolsRequest', - 'ListSpecialistPoolsResponse', - 'UpdateSpecialistPoolOperationMetadata', - 'UpdateSpecialistPoolRequest', - 'Measurement', - 'Study', - 'StudySpec', - 'Trial', - 'Tensorboard', - 'Scalar', - 'TensorboardBlob', - 'TensorboardBlobSequence', - 'TensorboardTensor', - 'TimeSeriesData', - 'TimeSeriesDataPoint', - 'TensorboardExperiment', - 'TensorboardRun', - 'BatchCreateTensorboardRunsRequest', - 'BatchCreateTensorboardRunsResponse', - 'BatchCreateTensorboardTimeSeriesRequest', - 'BatchCreateTensorboardTimeSeriesResponse', - 'BatchReadTensorboardTimeSeriesDataRequest', - 'BatchReadTensorboardTimeSeriesDataResponse', - 'CreateTensorboardExperimentRequest', - 'CreateTensorboardOperationMetadata', - 'CreateTensorboardRequest', - 'CreateTensorboardRunRequest', - 'CreateTensorboardTimeSeriesRequest', - 'DeleteTensorboardExperimentRequest', - 'DeleteTensorboardRequest', - 'DeleteTensorboardRunRequest', - 'DeleteTensorboardTimeSeriesRequest', - 'ExportTensorboardTimeSeriesDataRequest', - 'ExportTensorboardTimeSeriesDataResponse', - 'GetTensorboardExperimentRequest', - 'GetTensorboardRequest', - 'GetTensorboardRunRequest', - 'GetTensorboardTimeSeriesRequest', - 'ListTensorboardExperimentsRequest', - 'ListTensorboardExperimentsResponse', - 'ListTensorboardRunsRequest', - 'ListTensorboardRunsResponse', - 'ListTensorboardsRequest', - 'ListTensorboardsResponse', - 'ListTensorboardTimeSeriesRequest', - 'ListTensorboardTimeSeriesResponse', - 'ReadTensorboardBlobDataRequest', - 'ReadTensorboardBlobDataResponse', - 'ReadTensorboardTimeSeriesDataRequest', - 'ReadTensorboardTimeSeriesDataResponse', - 'UpdateTensorboardExperimentRequest', - 'UpdateTensorboardOperationMetadata', - 'UpdateTensorboardRequest', - 'UpdateTensorboardRunRequest', - 'UpdateTensorboardTimeSeriesRequest', - 'WriteTensorboardExperimentDataRequest', - 'WriteTensorboardExperimentDataResponse', - 'WriteTensorboardRunDataRequest', - 'WriteTensorboardRunDataResponse', - 'TensorboardTimeSeries', - 'FilterSplit', - 'FractionSplit', - 'InputDataConfig', - 'PredefinedSplit', - 'StratifiedSplit', - 'TimestampSplit', - 'TrainingPipeline', - 'BoolArray', - 'DoubleArray', - 'Int64Array', - 'StringArray', - 'UnmanagedContainerModel', - 'UserActionReference', - 'Value', - 'AddTrialMeasurementRequest', - 'CheckTrialEarlyStoppingStateMetatdata', - 'CheckTrialEarlyStoppingStateRequest', - 'CheckTrialEarlyStoppingStateResponse', - 'CompleteTrialRequest', - 'CreateStudyRequest', - 'CreateTrialRequest', - 'DeleteStudyRequest', - 'DeleteTrialRequest', - 'GetStudyRequest', - 'GetTrialRequest', - 'ListOptimalTrialsRequest', - 'ListOptimalTrialsResponse', - 'ListStudiesRequest', - 'ListStudiesResponse', - 'ListTrialsRequest', - 'ListTrialsResponse', - 'LookupStudyRequest', - 'StopTrialRequest', - 'SuggestTrialsMetadata', - 'SuggestTrialsRequest', - 'SuggestTrialsResponse', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/py.typed b/owl-bot-staging/v1/google/cloud/aiplatform/py.typed deleted file mode 100644 index 228f1c51c6..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-aiplatform package uses inline types. diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance/__init__.py deleted file mode 100644 index 41d6704c1f..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance/__init__.py +++ /dev/null @@ -1,37 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.image_classification import ImageClassificationPredictionInstance -from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.image_object_detection import ImageObjectDetectionPredictionInstance -from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.image_segmentation import ImageSegmentationPredictionInstance -from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.text_classification import TextClassificationPredictionInstance -from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.text_extraction import TextExtractionPredictionInstance -from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.text_sentiment import TextSentimentPredictionInstance -from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.video_action_recognition import VideoActionRecognitionPredictionInstance -from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.video_classification import VideoClassificationPredictionInstance -from google.cloud.aiplatform.v1.schema.predict.instance_v1.types.video_object_tracking import VideoObjectTrackingPredictionInstance - -__all__ = ('ImageClassificationPredictionInstance', - 'ImageObjectDetectionPredictionInstance', - 'ImageSegmentationPredictionInstance', - 'TextClassificationPredictionInstance', - 'TextExtractionPredictionInstance', - 'TextSentimentPredictionInstance', - 'VideoActionRecognitionPredictionInstance', - 'VideoClassificationPredictionInstance', - 'VideoObjectTrackingPredictionInstance', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance/py.typed b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance/py.typed deleted file mode 100644 index f70e7f605a..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-aiplatform-v1-schema-predict-instance package uses inline types. diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/__init__.py deleted file mode 100644 index 41ab5407a7..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/__init__.py +++ /dev/null @@ -1,38 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -from .types.image_classification import ImageClassificationPredictionInstance -from .types.image_object_detection import ImageObjectDetectionPredictionInstance -from .types.image_segmentation import ImageSegmentationPredictionInstance -from .types.text_classification import TextClassificationPredictionInstance -from .types.text_extraction import TextExtractionPredictionInstance -from .types.text_sentiment import TextSentimentPredictionInstance -from .types.video_action_recognition import VideoActionRecognitionPredictionInstance -from .types.video_classification import VideoClassificationPredictionInstance -from .types.video_object_tracking import VideoObjectTrackingPredictionInstance - -__all__ = ( -'ImageClassificationPredictionInstance', -'ImageObjectDetectionPredictionInstance', -'ImageSegmentationPredictionInstance', -'TextClassificationPredictionInstance', -'TextExtractionPredictionInstance', -'TextSentimentPredictionInstance', -'VideoActionRecognitionPredictionInstance', -'VideoClassificationPredictionInstance', -'VideoObjectTrackingPredictionInstance', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_metadata.json b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_metadata.json deleted file mode 100644 index 0ae909d6ea..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_metadata.json +++ /dev/null @@ -1,7 +0,0 @@ - { - "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", - "language": "python", - "libraryPackage": "google.cloud.aiplatform.v1.schema.predict.instance_v1", - "protoPackage": "google.cloud.aiplatform.v1.schema.predict.instance", - "schema": "1.0" -} diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/py.typed b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/py.typed deleted file mode 100644 index f70e7f605a..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-aiplatform-v1-schema-predict-instance package uses inline types. diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/services/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/services/__init__.py deleted file mode 100644 index 4de65971c2..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/services/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/__init__.py deleted file mode 100644 index 80a5332604..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/__init__.py +++ /dev/null @@ -1,54 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .image_classification import ( - ImageClassificationPredictionInstance, -) -from .image_object_detection import ( - ImageObjectDetectionPredictionInstance, -) -from .image_segmentation import ( - ImageSegmentationPredictionInstance, -) -from .text_classification import ( - TextClassificationPredictionInstance, -) -from .text_extraction import ( - TextExtractionPredictionInstance, -) -from .text_sentiment import ( - TextSentimentPredictionInstance, -) -from .video_action_recognition import ( - VideoActionRecognitionPredictionInstance, -) -from .video_classification import ( - VideoClassificationPredictionInstance, -) -from .video_object_tracking import ( - VideoObjectTrackingPredictionInstance, -) - -__all__ = ( - 'ImageClassificationPredictionInstance', - 'ImageObjectDetectionPredictionInstance', - 'ImageSegmentationPredictionInstance', - 'TextClassificationPredictionInstance', - 'TextExtractionPredictionInstance', - 'TextSentimentPredictionInstance', - 'VideoActionRecognitionPredictionInstance', - 'VideoClassificationPredictionInstance', - 'VideoObjectTrackingPredictionInstance', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_classification.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_classification.py deleted file mode 100644 index da17323f05..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_classification.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.instance', - manifest={ - 'ImageClassificationPredictionInstance', - }, -) - - -class ImageClassificationPredictionInstance(proto.Message): - r"""Prediction input format for Image Classification. - - Attributes: - content (str): - The image bytes or Cloud Storage URI to make - the prediction on. - mime_type (str): - The MIME type of the content of the image. - Only the images in below listed MIME types are - supported. - image/jpeg - - image/gif - - image/png - - image/webp - - image/bmp - - image/tiff - - image/vnd.microsoft.icon - """ - - content = proto.Field( - proto.STRING, - number=1, - ) - mime_type = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_object_detection.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_object_detection.py deleted file mode 100644 index 9e2f3ab651..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_object_detection.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.instance', - manifest={ - 'ImageObjectDetectionPredictionInstance', - }, -) - - -class ImageObjectDetectionPredictionInstance(proto.Message): - r"""Prediction input format for Image Object Detection. - - Attributes: - content (str): - The image bytes or Cloud Storage URI to make - the prediction on. - mime_type (str): - The MIME type of the content of the image. - Only the images in below listed MIME types are - supported. - image/jpeg - - image/gif - - image/png - - image/webp - - image/bmp - - image/tiff - - image/vnd.microsoft.icon - """ - - content = proto.Field( - proto.STRING, - number=1, - ) - mime_type = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_segmentation.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_segmentation.py deleted file mode 100644 index 18a25a7f29..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/image_segmentation.py +++ /dev/null @@ -1,50 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.instance', - manifest={ - 'ImageSegmentationPredictionInstance', - }, -) - - -class ImageSegmentationPredictionInstance(proto.Message): - r"""Prediction input format for Image Segmentation. - - Attributes: - content (str): - The image bytes to make the predictions on. - mime_type (str): - The MIME type of the content of the image. - Only the images in below listed MIME types are - supported. - image/jpeg - - image/png - """ - - content = proto.Field( - proto.STRING, - number=1, - ) - mime_type = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_classification.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_classification.py deleted file mode 100644 index 7c0dbad65a..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_classification.py +++ /dev/null @@ -1,49 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.instance', - manifest={ - 'TextClassificationPredictionInstance', - }, -) - - -class TextClassificationPredictionInstance(proto.Message): - r"""Prediction input format for Text Classification. - - Attributes: - content (str): - The text snippet to make the predictions on. - mime_type (str): - The MIME type of the text snippet. The - supported MIME types are listed below. - - text/plain - """ - - content = proto.Field( - proto.STRING, - number=1, - ) - mime_type = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_extraction.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_extraction.py deleted file mode 100644 index 0ecaa3f41c..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_extraction.py +++ /dev/null @@ -1,62 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.instance', - manifest={ - 'TextExtractionPredictionInstance', - }, -) - - -class TextExtractionPredictionInstance(proto.Message): - r"""Prediction input format for Text Extraction. - - Attributes: - content (str): - The text snippet to make the predictions on. - mime_type (str): - The MIME type of the text snippet. The - supported MIME types are listed below. - - text/plain - key (str): - This field is only used for batch prediction. - If a key is provided, the batch prediction - result will by mapped to this key. If omitted, - then the batch prediction result will contain - the entire input instance. Vertex AI will not - check if keys in the request are duplicates, so - it is up to the caller to ensure the keys are - unique. - """ - - content = proto.Field( - proto.STRING, - number=1, - ) - mime_type = proto.Field( - proto.STRING, - number=2, - ) - key = proto.Field( - proto.STRING, - number=3, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_sentiment.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_sentiment.py deleted file mode 100644 index 54e90aeac7..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/text_sentiment.py +++ /dev/null @@ -1,49 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.instance', - manifest={ - 'TextSentimentPredictionInstance', - }, -) - - -class TextSentimentPredictionInstance(proto.Message): - r"""Prediction input format for Text Sentiment. - - Attributes: - content (str): - The text snippet to make the predictions on. - mime_type (str): - The MIME type of the text snippet. The - supported MIME types are listed below. - - text/plain - """ - - content = proto.Field( - proto.STRING, - number=1, - ) - mime_type = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_action_recognition.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_action_recognition.py deleted file mode 100644 index 42cf9fc8b4..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_action_recognition.py +++ /dev/null @@ -1,73 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.instance', - manifest={ - 'VideoActionRecognitionPredictionInstance', - }, -) - - -class VideoActionRecognitionPredictionInstance(proto.Message): - r"""Prediction input format for Video Action Recognition. - - Attributes: - content (str): - The Google Cloud Storage location of the - video on which to perform the prediction. - mime_type (str): - The MIME type of the content of the video. - Only the following are supported: video/mp4 - video/avi video/quicktime - time_segment_start (str): - The beginning, inclusive, of the video's time - segment on which to perform the prediction. - Expressed as a number of seconds as measured - from the start of the video, with "s" appended - at the end. Fractions are allowed, up to a - microsecond precision. - time_segment_end (str): - The end, exclusive, of the video's time - segment on which to perform the prediction. - Expressed as a number of seconds as measured - from the start of the video, with "s" appended - at the end. Fractions are allowed, up to a - microsecond precision, and "inf" or "Infinity" - is allowed, which means the end of the video. - """ - - content = proto.Field( - proto.STRING, - number=1, - ) - mime_type = proto.Field( - proto.STRING, - number=2, - ) - time_segment_start = proto.Field( - proto.STRING, - number=3, - ) - time_segment_end = proto.Field( - proto.STRING, - number=4, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_classification.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_classification.py deleted file mode 100644 index 2f95833b6c..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_classification.py +++ /dev/null @@ -1,73 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.instance', - manifest={ - 'VideoClassificationPredictionInstance', - }, -) - - -class VideoClassificationPredictionInstance(proto.Message): - r"""Prediction input format for Video Classification. - - Attributes: - content (str): - The Google Cloud Storage location of the - video on which to perform the prediction. - mime_type (str): - The MIME type of the content of the video. - Only the following are supported: video/mp4 - video/avi video/quicktime - time_segment_start (str): - The beginning, inclusive, of the video's time - segment on which to perform the prediction. - Expressed as a number of seconds as measured - from the start of the video, with "s" appended - at the end. Fractions are allowed, up to a - microsecond precision. - time_segment_end (str): - The end, exclusive, of the video's time - segment on which to perform the prediction. - Expressed as a number of seconds as measured - from the start of the video, with "s" appended - at the end. Fractions are allowed, up to a - microsecond precision, and "inf" or "Infinity" - is allowed, which means the end of the video. - """ - - content = proto.Field( - proto.STRING, - number=1, - ) - mime_type = proto.Field( - proto.STRING, - number=2, - ) - time_segment_start = proto.Field( - proto.STRING, - number=3, - ) - time_segment_end = proto.Field( - proto.STRING, - number=4, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_object_tracking.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_object_tracking.py deleted file mode 100644 index e9152e1e38..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/instance_v1/types/video_object_tracking.py +++ /dev/null @@ -1,73 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.instance', - manifest={ - 'VideoObjectTrackingPredictionInstance', - }, -) - - -class VideoObjectTrackingPredictionInstance(proto.Message): - r"""Prediction input format for Video Object Tracking. - - Attributes: - content (str): - The Google Cloud Storage location of the - video on which to perform the prediction. - mime_type (str): - The MIME type of the content of the video. - Only the following are supported: video/mp4 - video/avi video/quicktime - time_segment_start (str): - The beginning, inclusive, of the video's time - segment on which to perform the prediction. - Expressed as a number of seconds as measured - from the start of the video, with "s" appended - at the end. Fractions are allowed, up to a - microsecond precision. - time_segment_end (str): - The end, exclusive, of the video's time - segment on which to perform the prediction. - Expressed as a number of seconds as measured - from the start of the video, with "s" appended - at the end. Fractions are allowed, up to a - microsecond precision, and "inf" or "Infinity" - is allowed, which means the end of the video. - """ - - content = proto.Field( - proto.STRING, - number=1, - ) - mime_type = proto.Field( - proto.STRING, - number=2, - ) - time_segment_start = proto.Field( - proto.STRING, - number=3, - ) - time_segment_end = proto.Field( - proto.STRING, - number=4, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params/__init__.py deleted file mode 100644 index 91ae7f0d5c..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params/__init__.py +++ /dev/null @@ -1,31 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -from google.cloud.aiplatform.v1.schema.predict.params_v1.types.image_classification import ImageClassificationPredictionParams -from google.cloud.aiplatform.v1.schema.predict.params_v1.types.image_object_detection import ImageObjectDetectionPredictionParams -from google.cloud.aiplatform.v1.schema.predict.params_v1.types.image_segmentation import ImageSegmentationPredictionParams -from google.cloud.aiplatform.v1.schema.predict.params_v1.types.video_action_recognition import VideoActionRecognitionPredictionParams -from google.cloud.aiplatform.v1.schema.predict.params_v1.types.video_classification import VideoClassificationPredictionParams -from google.cloud.aiplatform.v1.schema.predict.params_v1.types.video_object_tracking import VideoObjectTrackingPredictionParams - -__all__ = ('ImageClassificationPredictionParams', - 'ImageObjectDetectionPredictionParams', - 'ImageSegmentationPredictionParams', - 'VideoActionRecognitionPredictionParams', - 'VideoClassificationPredictionParams', - 'VideoObjectTrackingPredictionParams', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params/py.typed b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params/py.typed deleted file mode 100644 index df96e61590..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-aiplatform-v1-schema-predict-params package uses inline types. diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/__init__.py deleted file mode 100644 index 91b718b437..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/__init__.py +++ /dev/null @@ -1,32 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -from .types.image_classification import ImageClassificationPredictionParams -from .types.image_object_detection import ImageObjectDetectionPredictionParams -from .types.image_segmentation import ImageSegmentationPredictionParams -from .types.video_action_recognition import VideoActionRecognitionPredictionParams -from .types.video_classification import VideoClassificationPredictionParams -from .types.video_object_tracking import VideoObjectTrackingPredictionParams - -__all__ = ( -'ImageClassificationPredictionParams', -'ImageObjectDetectionPredictionParams', -'ImageSegmentationPredictionParams', -'VideoActionRecognitionPredictionParams', -'VideoClassificationPredictionParams', -'VideoObjectTrackingPredictionParams', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_metadata.json b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_metadata.json deleted file mode 100644 index edfffb441b..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_metadata.json +++ /dev/null @@ -1,7 +0,0 @@ - { - "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", - "language": "python", - "libraryPackage": "google.cloud.aiplatform.v1.schema.predict.params_v1", - "protoPackage": "google.cloud.aiplatform.v1.schema.predict.params", - "schema": "1.0" -} diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/py.typed b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/py.typed deleted file mode 100644 index df96e61590..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-aiplatform-v1-schema-predict-params package uses inline types. diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/services/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/services/__init__.py deleted file mode 100644 index 4de65971c2..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/services/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/__init__.py deleted file mode 100644 index 70a92bb59c..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/__init__.py +++ /dev/null @@ -1,42 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .image_classification import ( - ImageClassificationPredictionParams, -) -from .image_object_detection import ( - ImageObjectDetectionPredictionParams, -) -from .image_segmentation import ( - ImageSegmentationPredictionParams, -) -from .video_action_recognition import ( - VideoActionRecognitionPredictionParams, -) -from .video_classification import ( - VideoClassificationPredictionParams, -) -from .video_object_tracking import ( - VideoObjectTrackingPredictionParams, -) - -__all__ = ( - 'ImageClassificationPredictionParams', - 'ImageObjectDetectionPredictionParams', - 'ImageSegmentationPredictionParams', - 'VideoActionRecognitionPredictionParams', - 'VideoClassificationPredictionParams', - 'VideoObjectTrackingPredictionParams', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_classification.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_classification.py deleted file mode 100644 index 07324dfbb8..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_classification.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.params', - manifest={ - 'ImageClassificationPredictionParams', - }, -) - - -class ImageClassificationPredictionParams(proto.Message): - r"""Prediction model parameters for Image Classification. - - Attributes: - confidence_threshold (float): - The Model only returns predictions with at - least this confidence score. Default value is - 0.0 - max_predictions (int): - The Model only returns up to that many top, - by confidence score, predictions per instance. - If this number is very high, the Model may - return fewer predictions. Default value is 10. - """ - - confidence_threshold = proto.Field( - proto.FLOAT, - number=1, - ) - max_predictions = proto.Field( - proto.INT32, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_object_detection.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_object_detection.py deleted file mode 100644 index b320119073..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_object_detection.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.params', - manifest={ - 'ImageObjectDetectionPredictionParams', - }, -) - - -class ImageObjectDetectionPredictionParams(proto.Message): - r"""Prediction model parameters for Image Object Detection. - - Attributes: - confidence_threshold (float): - The Model only returns predictions with at - least this confidence score. Default value is - 0.0 - max_predictions (int): - The Model only returns up to that many top, - by confidence score, predictions per instance. - Note that number of returned predictions is also - limited by metadata's predictionsLimit. Default - value is 10. - """ - - confidence_threshold = proto.Field( - proto.FLOAT, - number=1, - ) - max_predictions = proto.Field( - proto.INT32, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_segmentation.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_segmentation.py deleted file mode 100644 index 504a61ec8f..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/image_segmentation.py +++ /dev/null @@ -1,45 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.params', - manifest={ - 'ImageSegmentationPredictionParams', - }, -) - - -class ImageSegmentationPredictionParams(proto.Message): - r"""Prediction model parameters for Image Segmentation. - - Attributes: - confidence_threshold (float): - When the model predicts category of pixels of - the image, it will only provide predictions for - pixels that it is at least this much confident - about. All other pixels will be classified as - background. Default value is 0.5. - """ - - confidence_threshold = proto.Field( - proto.FLOAT, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_action_recognition.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_action_recognition.py deleted file mode 100644 index d08f604b90..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_action_recognition.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.params', - manifest={ - 'VideoActionRecognitionPredictionParams', - }, -) - - -class VideoActionRecognitionPredictionParams(proto.Message): - r"""Prediction model parameters for Video Action Recognition. - - Attributes: - confidence_threshold (float): - The Model only returns predictions with at - least this confidence score. Default value is - 0.0 - max_predictions (int): - The model only returns up to that many top, - by confidence score, predictions per frame of - the video. If this number is very high, the - Model may return fewer predictions per frame. - Default value is 50. - """ - - confidence_threshold = proto.Field( - proto.FLOAT, - number=1, - ) - max_predictions = proto.Field( - proto.INT32, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_classification.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_classification.py deleted file mode 100644 index 38dbc6eec1..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_classification.py +++ /dev/null @@ -1,95 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.params', - manifest={ - 'VideoClassificationPredictionParams', - }, -) - - -class VideoClassificationPredictionParams(proto.Message): - r"""Prediction model parameters for Video Classification. - - Attributes: - confidence_threshold (float): - The Model only returns predictions with at - least this confidence score. Default value is - 0.0 - max_predictions (int): - The Model only returns up to that many top, - by confidence score, predictions per instance. - If this number is very high, the Model may - return fewer predictions. Default value is - 10,000. - segment_classification (bool): - Set to true to request segment-level - classification. Vertex AI returns labels and - their confidence scores for the entire time - segment of the video that user specified in the - input instance. Default value is true - shot_classification (bool): - Set to true to request shot-level - classification. Vertex AI determines the - boundaries for each camera shot in the entire - time segment of the video that user specified in - the input instance. Vertex AI then returns - labels and their confidence scores for each - detected shot, along with the start and end time - of the shot. - WARNING: Model evaluation is not done for this - classification type, the quality of it depends - on the training data, but there are no metrics - provided to describe that quality. - Default value is false - one_sec_interval_classification (bool): - Set to true to request classification for a - video at one-second intervals. Vertex AI returns - labels and their confidence scores for each - second of the entire time segment of the video - that user specified in the input WARNING: Model - evaluation is not done for this classification - type, the quality of it depends on the training - data, but there are no metrics provided to - describe that quality. Default value is false - """ - - confidence_threshold = proto.Field( - proto.FLOAT, - number=1, - ) - max_predictions = proto.Field( - proto.INT32, - number=2, - ) - segment_classification = proto.Field( - proto.BOOL, - number=3, - ) - shot_classification = proto.Field( - proto.BOOL, - number=4, - ) - one_sec_interval_classification = proto.Field( - proto.BOOL, - number=5, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_object_tracking.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_object_tracking.py deleted file mode 100644 index e9f1015d65..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/params_v1/types/video_object_tracking.py +++ /dev/null @@ -1,61 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.params', - manifest={ - 'VideoObjectTrackingPredictionParams', - }, -) - - -class VideoObjectTrackingPredictionParams(proto.Message): - r"""Prediction model parameters for Video Object Tracking. - - Attributes: - confidence_threshold (float): - The Model only returns predictions with at - least this confidence score. Default value is - 0.0 - max_predictions (int): - The model only returns up to that many top, - by confidence score, predictions per frame of - the video. If this number is very high, the - Model may return fewer predictions per frame. - Default value is 50. - min_bounding_box_size (float): - Only bounding boxes with shortest edge at - least that long as a relative value of video - frame size are returned. Default value is 0.0. - """ - - confidence_threshold = proto.Field( - proto.FLOAT, - number=1, - ) - max_predictions = proto.Field( - proto.INT32, - number=2, - ) - min_bounding_box_size = proto.Field( - proto.FLOAT, - number=3, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction/__init__.py deleted file mode 100644 index 27d9f97862..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction/__init__.py +++ /dev/null @@ -1,39 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.classification import ClassificationPredictionResult -from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.image_object_detection import ImageObjectDetectionPredictionResult -from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.image_segmentation import ImageSegmentationPredictionResult -from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.tabular_classification import TabularClassificationPredictionResult -from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.tabular_regression import TabularRegressionPredictionResult -from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.text_extraction import TextExtractionPredictionResult -from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.text_sentiment import TextSentimentPredictionResult -from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.video_action_recognition import VideoActionRecognitionPredictionResult -from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.video_classification import VideoClassificationPredictionResult -from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.video_object_tracking import VideoObjectTrackingPredictionResult - -__all__ = ('ClassificationPredictionResult', - 'ImageObjectDetectionPredictionResult', - 'ImageSegmentationPredictionResult', - 'TabularClassificationPredictionResult', - 'TabularRegressionPredictionResult', - 'TextExtractionPredictionResult', - 'TextSentimentPredictionResult', - 'VideoActionRecognitionPredictionResult', - 'VideoClassificationPredictionResult', - 'VideoObjectTrackingPredictionResult', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction/py.typed b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction/py.typed deleted file mode 100644 index 472fa4d8cc..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-aiplatform-v1-schema-predict-prediction package uses inline types. diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/__init__.py deleted file mode 100644 index 3cf9304526..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/__init__.py +++ /dev/null @@ -1,40 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -from .types.classification import ClassificationPredictionResult -from .types.image_object_detection import ImageObjectDetectionPredictionResult -from .types.image_segmentation import ImageSegmentationPredictionResult -from .types.tabular_classification import TabularClassificationPredictionResult -from .types.tabular_regression import TabularRegressionPredictionResult -from .types.text_extraction import TextExtractionPredictionResult -from .types.text_sentiment import TextSentimentPredictionResult -from .types.video_action_recognition import VideoActionRecognitionPredictionResult -from .types.video_classification import VideoClassificationPredictionResult -from .types.video_object_tracking import VideoObjectTrackingPredictionResult - -__all__ = ( -'ClassificationPredictionResult', -'ImageObjectDetectionPredictionResult', -'ImageSegmentationPredictionResult', -'TabularClassificationPredictionResult', -'TabularRegressionPredictionResult', -'TextExtractionPredictionResult', -'TextSentimentPredictionResult', -'VideoActionRecognitionPredictionResult', -'VideoClassificationPredictionResult', -'VideoObjectTrackingPredictionResult', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_metadata.json b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_metadata.json deleted file mode 100644 index ba1d67a00c..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_metadata.json +++ /dev/null @@ -1,7 +0,0 @@ - { - "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", - "language": "python", - "libraryPackage": "google.cloud.aiplatform.v1.schema.predict.prediction_v1", - "protoPackage": "google.cloud.aiplatform.v1.schema.predict.prediction", - "schema": "1.0" -} diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/py.typed b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/py.typed deleted file mode 100644 index 472fa4d8cc..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-aiplatform-v1-schema-predict-prediction package uses inline types. diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/services/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/services/__init__.py deleted file mode 100644 index 4de65971c2..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/services/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/__init__.py deleted file mode 100644 index b7b7c056aa..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/__init__.py +++ /dev/null @@ -1,58 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .classification import ( - ClassificationPredictionResult, -) -from .image_object_detection import ( - ImageObjectDetectionPredictionResult, -) -from .image_segmentation import ( - ImageSegmentationPredictionResult, -) -from .tabular_classification import ( - TabularClassificationPredictionResult, -) -from .tabular_regression import ( - TabularRegressionPredictionResult, -) -from .text_extraction import ( - TextExtractionPredictionResult, -) -from .text_sentiment import ( - TextSentimentPredictionResult, -) -from .video_action_recognition import ( - VideoActionRecognitionPredictionResult, -) -from .video_classification import ( - VideoClassificationPredictionResult, -) -from .video_object_tracking import ( - VideoObjectTrackingPredictionResult, -) - -__all__ = ( - 'ClassificationPredictionResult', - 'ImageObjectDetectionPredictionResult', - 'ImageSegmentationPredictionResult', - 'TabularClassificationPredictionResult', - 'TabularRegressionPredictionResult', - 'TextExtractionPredictionResult', - 'TextSentimentPredictionResult', - 'VideoActionRecognitionPredictionResult', - 'VideoClassificationPredictionResult', - 'VideoObjectTrackingPredictionResult', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/classification.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/classification.py deleted file mode 100644 index 2b4bffa1ec..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/classification.py +++ /dev/null @@ -1,57 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.prediction', - manifest={ - 'ClassificationPredictionResult', - }, -) - - -class ClassificationPredictionResult(proto.Message): - r"""Prediction output format for Image and Text Classification. - - Attributes: - ids (Sequence[int]): - The resource IDs of the AnnotationSpecs that - had been identified. - display_names (Sequence[str]): - The display names of the AnnotationSpecs that - had been identified, order matches the IDs. - confidences (Sequence[float]): - The Model's confidences in correctness of the - predicted IDs, higher value means higher - confidence. Order matches the Ids. - """ - - ids = proto.RepeatedField( - proto.INT64, - number=1, - ) - display_names = proto.RepeatedField( - proto.STRING, - number=2, - ) - confidences = proto.RepeatedField( - proto.FLOAT, - number=3, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_object_detection.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_object_detection.py deleted file mode 100644 index 74029a3ad8..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_object_detection.py +++ /dev/null @@ -1,73 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import struct_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.prediction', - manifest={ - 'ImageObjectDetectionPredictionResult', - }, -) - - -class ImageObjectDetectionPredictionResult(proto.Message): - r"""Prediction output format for Image Object Detection. - - Attributes: - ids (Sequence[int]): - The resource IDs of the AnnotationSpecs that - had been identified, ordered by the confidence - score descendingly. - display_names (Sequence[str]): - The display names of the AnnotationSpecs that - had been identified, order matches the IDs. - confidences (Sequence[float]): - The Model's confidences in correctness of the - predicted IDs, higher value means higher - confidence. Order matches the Ids. - bboxes (Sequence[google.protobuf.struct_pb2.ListValue]): - Bounding boxes, i.e. the rectangles over the image, that - pinpoint the found AnnotationSpecs. Given in order that - matches the IDs. Each bounding box is an array of 4 numbers - ``xMin``, ``xMax``, ``yMin``, and ``yMax``, which represent - the extremal coordinates of the box. They are relative to - the image size, and the point 0,0 is in the top left of the - image. - """ - - ids = proto.RepeatedField( - proto.INT64, - number=1, - ) - display_names = proto.RepeatedField( - proto.STRING, - number=2, - ) - confidences = proto.RepeatedField( - proto.FLOAT, - number=3, - ) - bboxes = proto.RepeatedField( - proto.MESSAGE, - number=4, - message=struct_pb2.ListValue, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_segmentation.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_segmentation.py deleted file mode 100644 index 263298a870..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/image_segmentation.py +++ /dev/null @@ -1,62 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.prediction', - manifest={ - 'ImageSegmentationPredictionResult', - }, -) - - -class ImageSegmentationPredictionResult(proto.Message): - r"""Prediction output format for Image Segmentation. - - Attributes: - category_mask (str): - A PNG image where each pixel in the mask - represents the category in which the pixel in - the original image was predicted to belong to. - The size of this image will be the same as the - original image. The mapping between the - AnntoationSpec and the color can be found in - model's metadata. The model will choose the most - likely category and if none of the categories - reach the confidence threshold, the pixel will - be marked as background. - confidence_mask (str): - A one channel image which is encoded as an - 8bit lossless PNG. The size of the image will be - the same as the original image. For a specific - pixel, darker color means less confidence in - correctness of the cateogry in the categoryMask - for the corresponding pixel. Black means no - confidence and white means complete confidence. - """ - - category_mask = proto.Field( - proto.STRING, - number=1, - ) - confidence_mask = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_classification.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_classification.py deleted file mode 100644 index 5d5fbadd49..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_classification.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.prediction', - manifest={ - 'TabularClassificationPredictionResult', - }, -) - - -class TabularClassificationPredictionResult(proto.Message): - r"""Prediction output format for Tabular Classification. - - Attributes: - classes (Sequence[str]): - The name of the classes being classified, - contains all possible values of the target - column. - scores (Sequence[float]): - The model's confidence in each class being - correct, higher value means higher confidence. - The N-th score corresponds to the N-th class in - classes. - """ - - classes = proto.RepeatedField( - proto.STRING, - number=1, - ) - scores = proto.RepeatedField( - proto.FLOAT, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_regression.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_regression.py deleted file mode 100644 index ad1b02facd..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/tabular_regression.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.prediction', - manifest={ - 'TabularRegressionPredictionResult', - }, -) - - -class TabularRegressionPredictionResult(proto.Message): - r"""Prediction output format for Tabular Regression. - - Attributes: - value (float): - The regression value. - lower_bound (float): - The lower bound of the prediction interval. - upper_bound (float): - The upper bound of the prediction interval. - """ - - value = proto.Field( - proto.FLOAT, - number=1, - ) - lower_bound = proto.Field( - proto.FLOAT, - number=2, - ) - upper_bound = proto.Field( - proto.FLOAT, - number=3, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_extraction.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_extraction.py deleted file mode 100644 index d546006a08..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_extraction.py +++ /dev/null @@ -1,78 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.prediction', - manifest={ - 'TextExtractionPredictionResult', - }, -) - - -class TextExtractionPredictionResult(proto.Message): - r"""Prediction output format for Text Extraction. - - Attributes: - ids (Sequence[int]): - The resource IDs of the AnnotationSpecs that - had been identified, ordered by the confidence - score descendingly. - display_names (Sequence[str]): - The display names of the AnnotationSpecs that - had been identified, order matches the IDs. - text_segment_start_offsets (Sequence[int]): - The start offsets, inclusive, of the text - segment in which the AnnotationSpec has been - identified. Expressed as a zero-based number of - characters as measured from the start of the - text snippet. - text_segment_end_offsets (Sequence[int]): - The end offsets, inclusive, of the text - segment in which the AnnotationSpec has been - identified. Expressed as a zero-based number of - characters as measured from the start of the - text snippet. - confidences (Sequence[float]): - The Model's confidences in correctness of the - predicted IDs, higher value means higher - confidence. Order matches the Ids. - """ - - ids = proto.RepeatedField( - proto.INT64, - number=1, - ) - display_names = proto.RepeatedField( - proto.STRING, - number=2, - ) - text_segment_start_offsets = proto.RepeatedField( - proto.INT64, - number=3, - ) - text_segment_end_offsets = proto.RepeatedField( - proto.INT64, - number=4, - ) - confidences = proto.RepeatedField( - proto.FLOAT, - number=5, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_sentiment.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_sentiment.py deleted file mode 100644 index a1c615f6de..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/text_sentiment.py +++ /dev/null @@ -1,48 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.prediction', - manifest={ - 'TextSentimentPredictionResult', - }, -) - - -class TextSentimentPredictionResult(proto.Message): - r"""Prediction output format for Text Sentiment - - Attributes: - sentiment (int): - The integer sentiment labels between 0 - (inclusive) and sentimentMax label (inclusive), - while 0 maps to the least positive sentiment and - sentimentMax maps to the most positive one. The - higher the score is, the more positive the - sentiment in the text snippet is. Note: - sentimentMax is an integer value between 1 - (inclusive) and 10 (inclusive). - """ - - sentiment = proto.Field( - proto.INT32, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_action_recognition.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_action_recognition.py deleted file mode 100644 index 035fc792ad..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_action_recognition.py +++ /dev/null @@ -1,85 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import wrappers_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.prediction', - manifest={ - 'VideoActionRecognitionPredictionResult', - }, -) - - -class VideoActionRecognitionPredictionResult(proto.Message): - r"""Prediction output format for Video Action Recognition. - - Attributes: - id (str): - The resource ID of the AnnotationSpec that - had been identified. - display_name (str): - The display name of the AnnotationSpec that - had been identified. - time_segment_start (google.protobuf.duration_pb2.Duration): - The beginning, inclusive, of the video's time - segment in which the AnnotationSpec has been - identified. Expressed as a number of seconds as - measured from the start of the video, with - fractions up to a microsecond precision, and - with "s" appended at the end. - time_segment_end (google.protobuf.duration_pb2.Duration): - The end, exclusive, of the video's time - segment in which the AnnotationSpec has been - identified. Expressed as a number of seconds as - measured from the start of the video, with - fractions up to a microsecond precision, and - with "s" appended at the end. - confidence (google.protobuf.wrappers_pb2.FloatValue): - The Model's confidence in correction of this - prediction, higher value means higher - confidence. - """ - - id = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - time_segment_start = proto.Field( - proto.MESSAGE, - number=4, - message=duration_pb2.Duration, - ) - time_segment_end = proto.Field( - proto.MESSAGE, - number=5, - message=duration_pb2.Duration, - ) - confidence = proto.Field( - proto.MESSAGE, - number=6, - message=wrappers_pb2.FloatValue, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_classification.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_classification.py deleted file mode 100644 index ade084fc57..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_classification.py +++ /dev/null @@ -1,103 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import wrappers_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.prediction', - manifest={ - 'VideoClassificationPredictionResult', - }, -) - - -class VideoClassificationPredictionResult(proto.Message): - r"""Prediction output format for Video Classification. - - Attributes: - id (str): - The resource ID of the AnnotationSpec that - had been identified. - display_name (str): - The display name of the AnnotationSpec that - had been identified. - type_ (str): - The type of the prediction. The requested - types can be configured via parameters. This - will be one of - segment-classification - - shot-classification - - one-sec-interval-classification - time_segment_start (google.protobuf.duration_pb2.Duration): - The beginning, inclusive, of the video's time - segment in which the AnnotationSpec has been - identified. Expressed as a number of seconds as - measured from the start of the video, with - fractions up to a microsecond precision, and - with "s" appended at the end. Note that for - 'segment-classification' prediction type, this - equals the original 'timeSegmentStart' from the - input instance, for other types it is the start - of a shot or a 1 second interval respectively. - time_segment_end (google.protobuf.duration_pb2.Duration): - The end, exclusive, of the video's time - segment in which the AnnotationSpec has been - identified. Expressed as a number of seconds as - measured from the start of the video, with - fractions up to a microsecond precision, and - with "s" appended at the end. Note that for - 'segment-classification' prediction type, this - equals the original 'timeSegmentEnd' from the - input instance, for other types it is the end of - a shot or a 1 second interval respectively. - confidence (google.protobuf.wrappers_pb2.FloatValue): - The Model's confidence in correction of this - prediction, higher value means higher - confidence. - """ - - id = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - type_ = proto.Field( - proto.STRING, - number=3, - ) - time_segment_start = proto.Field( - proto.MESSAGE, - number=4, - message=duration_pb2.Duration, - ) - time_segment_end = proto.Field( - proto.MESSAGE, - number=5, - message=duration_pb2.Duration, - ) - confidence = proto.Field( - proto.MESSAGE, - number=6, - message=wrappers_pb2.FloatValue, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_object_tracking.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_object_tracking.py deleted file mode 100644 index 7c88f718e8..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/predict/prediction_v1/types/video_object_tracking.py +++ /dev/null @@ -1,145 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import wrappers_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.predict.prediction', - manifest={ - 'VideoObjectTrackingPredictionResult', - }, -) - - -class VideoObjectTrackingPredictionResult(proto.Message): - r"""Prediction output format for Video Object Tracking. - - Attributes: - id (str): - The resource ID of the AnnotationSpec that - had been identified. - display_name (str): - The display name of the AnnotationSpec that - had been identified. - time_segment_start (google.protobuf.duration_pb2.Duration): - The beginning, inclusive, of the video's time - segment in which the object instance has been - detected. Expressed as a number of seconds as - measured from the start of the video, with - fractions up to a microsecond precision, and - with "s" appended at the end. - time_segment_end (google.protobuf.duration_pb2.Duration): - The end, inclusive, of the video's time - segment in which the object instance has been - detected. Expressed as a number of seconds as - measured from the start of the video, with - fractions up to a microsecond precision, and - with "s" appended at the end. - confidence (google.protobuf.wrappers_pb2.FloatValue): - The Model's confidence in correction of this - prediction, higher value means higher - confidence. - frames (Sequence[google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.VideoObjectTrackingPredictionResult.Frame]): - All of the frames of the video in which a - single object instance has been detected. The - bounding boxes in the frames identify the same - object. - """ - - class Frame(proto.Message): - r"""The fields ``xMin``, ``xMax``, ``yMin``, and ``yMax`` refer to a - bounding box, i.e. the rectangle over the video frame pinpointing - the found AnnotationSpec. The coordinates are relative to the frame - size, and the point 0,0 is in the top left of the frame. - - Attributes: - time_offset (google.protobuf.duration_pb2.Duration): - A time (frame) of a video in which the object - has been detected. Expressed as a number of - seconds as measured from the start of the video, - with fractions up to a microsecond precision, - and with "s" appended at the end. - x_min (google.protobuf.wrappers_pb2.FloatValue): - The leftmost coordinate of the bounding box. - x_max (google.protobuf.wrappers_pb2.FloatValue): - The rightmost coordinate of the bounding box. - y_min (google.protobuf.wrappers_pb2.FloatValue): - The topmost coordinate of the bounding box. - y_max (google.protobuf.wrappers_pb2.FloatValue): - The bottommost coordinate of the bounding - box. - """ - - time_offset = proto.Field( - proto.MESSAGE, - number=1, - message=duration_pb2.Duration, - ) - x_min = proto.Field( - proto.MESSAGE, - number=2, - message=wrappers_pb2.FloatValue, - ) - x_max = proto.Field( - proto.MESSAGE, - number=3, - message=wrappers_pb2.FloatValue, - ) - y_min = proto.Field( - proto.MESSAGE, - number=4, - message=wrappers_pb2.FloatValue, - ) - y_max = proto.Field( - proto.MESSAGE, - number=5, - message=wrappers_pb2.FloatValue, - ) - - id = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - time_segment_start = proto.Field( - proto.MESSAGE, - number=3, - message=duration_pb2.Duration, - ) - time_segment_end = proto.Field( - proto.MESSAGE, - number=4, - message=duration_pb2.Duration, - ) - confidence = proto.Field( - proto.MESSAGE, - number=5, - message=wrappers_pb2.FloatValue, - ) - frames = proto.RepeatedField( - proto.MESSAGE, - number=6, - message=Frame, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition/__init__.py deleted file mode 100644 index 0e86266695..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition/__init__.py +++ /dev/null @@ -1,69 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_classification import AutoMlImageClassification -from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_classification import AutoMlImageClassificationInputs -from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_classification import AutoMlImageClassificationMetadata -from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_object_detection import AutoMlImageObjectDetection -from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_object_detection import AutoMlImageObjectDetectionInputs -from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_object_detection import AutoMlImageObjectDetectionMetadata -from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_segmentation import AutoMlImageSegmentation -from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_segmentation import AutoMlImageSegmentationInputs -from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_segmentation import AutoMlImageSegmentationMetadata -from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_tables import AutoMlTables -from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_tables import AutoMlTablesInputs -from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_tables import AutoMlTablesMetadata -from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_text_classification import AutoMlTextClassification -from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_text_classification import AutoMlTextClassificationInputs -from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_text_extraction import AutoMlTextExtraction -from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_text_extraction import AutoMlTextExtractionInputs -from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_text_sentiment import AutoMlTextSentiment -from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_text_sentiment import AutoMlTextSentimentInputs -from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_video_action_recognition import AutoMlVideoActionRecognition -from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_video_action_recognition import AutoMlVideoActionRecognitionInputs -from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_video_classification import AutoMlVideoClassification -from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_video_classification import AutoMlVideoClassificationInputs -from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_video_object_tracking import AutoMlVideoObjectTracking -from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_video_object_tracking import AutoMlVideoObjectTrackingInputs -from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.export_evaluated_data_items_config import ExportEvaluatedDataItemsConfig - -__all__ = ('AutoMlImageClassification', - 'AutoMlImageClassificationInputs', - 'AutoMlImageClassificationMetadata', - 'AutoMlImageObjectDetection', - 'AutoMlImageObjectDetectionInputs', - 'AutoMlImageObjectDetectionMetadata', - 'AutoMlImageSegmentation', - 'AutoMlImageSegmentationInputs', - 'AutoMlImageSegmentationMetadata', - 'AutoMlTables', - 'AutoMlTablesInputs', - 'AutoMlTablesMetadata', - 'AutoMlTextClassification', - 'AutoMlTextClassificationInputs', - 'AutoMlTextExtraction', - 'AutoMlTextExtractionInputs', - 'AutoMlTextSentiment', - 'AutoMlTextSentimentInputs', - 'AutoMlVideoActionRecognition', - 'AutoMlVideoActionRecognitionInputs', - 'AutoMlVideoClassification', - 'AutoMlVideoClassificationInputs', - 'AutoMlVideoObjectTracking', - 'AutoMlVideoObjectTrackingInputs', - 'ExportEvaluatedDataItemsConfig', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition/py.typed b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition/py.typed deleted file mode 100644 index 1a9d2972a0..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-aiplatform-v1-schema-trainingjob-definition package uses inline types. diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/__init__.py deleted file mode 100644 index f4e2447d46..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/__init__.py +++ /dev/null @@ -1,70 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -from .types.automl_image_classification import AutoMlImageClassification -from .types.automl_image_classification import AutoMlImageClassificationInputs -from .types.automl_image_classification import AutoMlImageClassificationMetadata -from .types.automl_image_object_detection import AutoMlImageObjectDetection -from .types.automl_image_object_detection import AutoMlImageObjectDetectionInputs -from .types.automl_image_object_detection import AutoMlImageObjectDetectionMetadata -from .types.automl_image_segmentation import AutoMlImageSegmentation -from .types.automl_image_segmentation import AutoMlImageSegmentationInputs -from .types.automl_image_segmentation import AutoMlImageSegmentationMetadata -from .types.automl_tables import AutoMlTables -from .types.automl_tables import AutoMlTablesInputs -from .types.automl_tables import AutoMlTablesMetadata -from .types.automl_text_classification import AutoMlTextClassification -from .types.automl_text_classification import AutoMlTextClassificationInputs -from .types.automl_text_extraction import AutoMlTextExtraction -from .types.automl_text_extraction import AutoMlTextExtractionInputs -from .types.automl_text_sentiment import AutoMlTextSentiment -from .types.automl_text_sentiment import AutoMlTextSentimentInputs -from .types.automl_video_action_recognition import AutoMlVideoActionRecognition -from .types.automl_video_action_recognition import AutoMlVideoActionRecognitionInputs -from .types.automl_video_classification import AutoMlVideoClassification -from .types.automl_video_classification import AutoMlVideoClassificationInputs -from .types.automl_video_object_tracking import AutoMlVideoObjectTracking -from .types.automl_video_object_tracking import AutoMlVideoObjectTrackingInputs -from .types.export_evaluated_data_items_config import ExportEvaluatedDataItemsConfig - -__all__ = ( -'AutoMlImageClassification', -'AutoMlImageClassificationInputs', -'AutoMlImageClassificationMetadata', -'AutoMlImageObjectDetection', -'AutoMlImageObjectDetectionInputs', -'AutoMlImageObjectDetectionMetadata', -'AutoMlImageSegmentation', -'AutoMlImageSegmentationInputs', -'AutoMlImageSegmentationMetadata', -'AutoMlTables', -'AutoMlTablesInputs', -'AutoMlTablesMetadata', -'AutoMlTextClassification', -'AutoMlTextClassificationInputs', -'AutoMlTextExtraction', -'AutoMlTextExtractionInputs', -'AutoMlTextSentiment', -'AutoMlTextSentimentInputs', -'AutoMlVideoActionRecognition', -'AutoMlVideoActionRecognitionInputs', -'AutoMlVideoClassification', -'AutoMlVideoClassificationInputs', -'AutoMlVideoObjectTracking', -'AutoMlVideoObjectTrackingInputs', -'ExportEvaluatedDataItemsConfig', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/gapic_metadata.json b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/gapic_metadata.json deleted file mode 100644 index 620ff75f05..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/gapic_metadata.json +++ /dev/null @@ -1,7 +0,0 @@ - { - "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", - "language": "python", - "libraryPackage": "google.cloud.aiplatform.v1.schema.trainingjob.definition_v1", - "protoPackage": "google.cloud.aiplatform.v1.schema.trainingjob.definition", - "schema": "1.0" -} diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/py.typed b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/py.typed deleted file mode 100644 index 1a9d2972a0..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-aiplatform-v1-schema-trainingjob-definition package uses inline types. diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/services/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/services/__init__.py deleted file mode 100644 index 4de65971c2..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/services/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/__init__.py deleted file mode 100644 index 4b8bb9425b..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/__init__.py +++ /dev/null @@ -1,90 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .automl_image_classification import ( - AutoMlImageClassification, - AutoMlImageClassificationInputs, - AutoMlImageClassificationMetadata, -) -from .automl_image_object_detection import ( - AutoMlImageObjectDetection, - AutoMlImageObjectDetectionInputs, - AutoMlImageObjectDetectionMetadata, -) -from .automl_image_segmentation import ( - AutoMlImageSegmentation, - AutoMlImageSegmentationInputs, - AutoMlImageSegmentationMetadata, -) -from .automl_tables import ( - AutoMlTables, - AutoMlTablesInputs, - AutoMlTablesMetadata, -) -from .automl_text_classification import ( - AutoMlTextClassification, - AutoMlTextClassificationInputs, -) -from .automl_text_extraction import ( - AutoMlTextExtraction, - AutoMlTextExtractionInputs, -) -from .automl_text_sentiment import ( - AutoMlTextSentiment, - AutoMlTextSentimentInputs, -) -from .automl_video_action_recognition import ( - AutoMlVideoActionRecognition, - AutoMlVideoActionRecognitionInputs, -) -from .automl_video_classification import ( - AutoMlVideoClassification, - AutoMlVideoClassificationInputs, -) -from .automl_video_object_tracking import ( - AutoMlVideoObjectTracking, - AutoMlVideoObjectTrackingInputs, -) -from .export_evaluated_data_items_config import ( - ExportEvaluatedDataItemsConfig, -) - -__all__ = ( - 'AutoMlImageClassification', - 'AutoMlImageClassificationInputs', - 'AutoMlImageClassificationMetadata', - 'AutoMlImageObjectDetection', - 'AutoMlImageObjectDetectionInputs', - 'AutoMlImageObjectDetectionMetadata', - 'AutoMlImageSegmentation', - 'AutoMlImageSegmentationInputs', - 'AutoMlImageSegmentationMetadata', - 'AutoMlTables', - 'AutoMlTablesInputs', - 'AutoMlTablesMetadata', - 'AutoMlTextClassification', - 'AutoMlTextClassificationInputs', - 'AutoMlTextExtraction', - 'AutoMlTextExtractionInputs', - 'AutoMlTextSentiment', - 'AutoMlTextSentimentInputs', - 'AutoMlVideoActionRecognition', - 'AutoMlVideoActionRecognitionInputs', - 'AutoMlVideoClassification', - 'AutoMlVideoClassificationInputs', - 'AutoMlVideoObjectTracking', - 'AutoMlVideoObjectTrackingInputs', - 'ExportEvaluatedDataItemsConfig', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_classification.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_classification.py deleted file mode 100644 index 99c8e77b1a..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_classification.py +++ /dev/null @@ -1,158 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.trainingjob.definition', - manifest={ - 'AutoMlImageClassification', - 'AutoMlImageClassificationInputs', - 'AutoMlImageClassificationMetadata', - }, -) - - -class AutoMlImageClassification(proto.Message): - r"""A TrainingJob that trains and uploads an AutoML Image - Classification Model. - - Attributes: - inputs (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageClassificationInputs): - The input parameters of this TrainingJob. - metadata (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageClassificationMetadata): - The metadata information. - """ - - inputs = proto.Field( - proto.MESSAGE, - number=1, - message='AutoMlImageClassificationInputs', - ) - metadata = proto.Field( - proto.MESSAGE, - number=2, - message='AutoMlImageClassificationMetadata', - ) - - -class AutoMlImageClassificationInputs(proto.Message): - r""" - - Attributes: - model_type (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageClassificationInputs.ModelType): - - base_model_id (str): - The ID of the ``base`` model. If it is specified, the new - model will be trained based on the ``base`` model. - Otherwise, the new model will be trained from scratch. The - ``base`` model must be in the same Project and Location as - the new Model to train, and have the same modelType. - budget_milli_node_hours (int): - The training budget of creating this model, expressed in - milli node hours i.e. 1,000 value in this field means 1 node - hour. The actual metadata.costMilliNodeHours will be equal - or less than this value. If further model training ceases to - provide any improvements, it will stop without using the - full budget and the metadata.successfulStopReason will be - ``model-converged``. Note, node_hour = actual_hour \* - number_of_nodes_involved. For modelType - ``cloud``\ (default), the budget must be between 8,000 and - 800,000 milli node hours, inclusive. The default value is - 192,000 which represents one day in wall time, considering 8 - nodes are used. For model types ``mobile-tf-low-latency-1``, - ``mobile-tf-versatile-1``, ``mobile-tf-high-accuracy-1``, - the training budget must be between 1,000 and 100,000 milli - node hours, inclusive. The default value is 24,000 which - represents one day in wall time on a single node that is - used. - disable_early_stopping (bool): - Use the entire training budget. This disables - the early stopping feature. When false the early - stopping feature is enabled, which means that - AutoML Image Classification might stop training - before the entire training budget has been used. - multi_label (bool): - If false, a single-label (multi-class) Model - will be trained (i.e. assuming that for each - image just up to one annotation may be - applicable). If true, a multi-label Model will - be trained (i.e. assuming that for each image - multiple annotations may be applicable). - """ - class ModelType(proto.Enum): - r"""""" - MODEL_TYPE_UNSPECIFIED = 0 - CLOUD = 1 - MOBILE_TF_LOW_LATENCY_1 = 2 - MOBILE_TF_VERSATILE_1 = 3 - MOBILE_TF_HIGH_ACCURACY_1 = 4 - - model_type = proto.Field( - proto.ENUM, - number=1, - enum=ModelType, - ) - base_model_id = proto.Field( - proto.STRING, - number=2, - ) - budget_milli_node_hours = proto.Field( - proto.INT64, - number=3, - ) - disable_early_stopping = proto.Field( - proto.BOOL, - number=4, - ) - multi_label = proto.Field( - proto.BOOL, - number=5, - ) - - -class AutoMlImageClassificationMetadata(proto.Message): - r""" - - Attributes: - cost_milli_node_hours (int): - The actual training cost of creating this - model, expressed in milli node hours, i.e. 1,000 - value in this field means 1 node hour. - Guaranteed to not exceed - inputs.budgetMilliNodeHours. - successful_stop_reason (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageClassificationMetadata.SuccessfulStopReason): - For successful job completions, this is the - reason why the job has finished. - """ - class SuccessfulStopReason(proto.Enum): - r"""""" - SUCCESSFUL_STOP_REASON_UNSPECIFIED = 0 - BUDGET_REACHED = 1 - MODEL_CONVERGED = 2 - - cost_milli_node_hours = proto.Field( - proto.INT64, - number=1, - ) - successful_stop_reason = proto.Field( - proto.ENUM, - number=2, - enum=SuccessfulStopReason, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_object_detection.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_object_detection.py deleted file mode 100644 index 79989ed941..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_object_detection.py +++ /dev/null @@ -1,139 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.trainingjob.definition', - manifest={ - 'AutoMlImageObjectDetection', - 'AutoMlImageObjectDetectionInputs', - 'AutoMlImageObjectDetectionMetadata', - }, -) - - -class AutoMlImageObjectDetection(proto.Message): - r"""A TrainingJob that trains and uploads an AutoML Image Object - Detection Model. - - Attributes: - inputs (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageObjectDetectionInputs): - The input parameters of this TrainingJob. - metadata (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageObjectDetectionMetadata): - The metadata information - """ - - inputs = proto.Field( - proto.MESSAGE, - number=1, - message='AutoMlImageObjectDetectionInputs', - ) - metadata = proto.Field( - proto.MESSAGE, - number=2, - message='AutoMlImageObjectDetectionMetadata', - ) - - -class AutoMlImageObjectDetectionInputs(proto.Message): - r""" - - Attributes: - model_type (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageObjectDetectionInputs.ModelType): - - budget_milli_node_hours (int): - The training budget of creating this model, expressed in - milli node hours i.e. 1,000 value in this field means 1 node - hour. The actual metadata.costMilliNodeHours will be equal - or less than this value. If further model training ceases to - provide any improvements, it will stop without using the - full budget and the metadata.successfulStopReason will be - ``model-converged``. Note, node_hour = actual_hour \* - number_of_nodes_involved. For modelType - ``cloud``\ (default), the budget must be between 20,000 and - 900,000 milli node hours, inclusive. The default value is - 216,000 which represents one day in wall time, considering 9 - nodes are used. For model types ``mobile-tf-low-latency-1``, - ``mobile-tf-versatile-1``, ``mobile-tf-high-accuracy-1`` the - training budget must be between 1,000 and 100,000 milli node - hours, inclusive. The default value is 24,000 which - represents one day in wall time on a single node that is - used. - disable_early_stopping (bool): - Use the entire training budget. This disables - the early stopping feature. When false the early - stopping feature is enabled, which means that - AutoML Image Object Detection might stop - training before the entire training budget has - been used. - """ - class ModelType(proto.Enum): - r"""""" - MODEL_TYPE_UNSPECIFIED = 0 - CLOUD_HIGH_ACCURACY_1 = 1 - CLOUD_LOW_LATENCY_1 = 2 - MOBILE_TF_LOW_LATENCY_1 = 3 - MOBILE_TF_VERSATILE_1 = 4 - MOBILE_TF_HIGH_ACCURACY_1 = 5 - - model_type = proto.Field( - proto.ENUM, - number=1, - enum=ModelType, - ) - budget_milli_node_hours = proto.Field( - proto.INT64, - number=2, - ) - disable_early_stopping = proto.Field( - proto.BOOL, - number=3, - ) - - -class AutoMlImageObjectDetectionMetadata(proto.Message): - r""" - - Attributes: - cost_milli_node_hours (int): - The actual training cost of creating this - model, expressed in milli node hours, i.e. 1,000 - value in this field means 1 node hour. - Guaranteed to not exceed - inputs.budgetMilliNodeHours. - successful_stop_reason (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageObjectDetectionMetadata.SuccessfulStopReason): - For successful job completions, this is the - reason why the job has finished. - """ - class SuccessfulStopReason(proto.Enum): - r"""""" - SUCCESSFUL_STOP_REASON_UNSPECIFIED = 0 - BUDGET_REACHED = 1 - MODEL_CONVERGED = 2 - - cost_milli_node_hours = proto.Field( - proto.INT64, - number=1, - ) - successful_stop_reason = proto.Field( - proto.ENUM, - number=2, - enum=SuccessfulStopReason, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_segmentation.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_segmentation.py deleted file mode 100644 index 233fdb9da8..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_image_segmentation.py +++ /dev/null @@ -1,133 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.trainingjob.definition', - manifest={ - 'AutoMlImageSegmentation', - 'AutoMlImageSegmentationInputs', - 'AutoMlImageSegmentationMetadata', - }, -) - - -class AutoMlImageSegmentation(proto.Message): - r"""A TrainingJob that trains and uploads an AutoML Image - Segmentation Model. - - Attributes: - inputs (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageSegmentationInputs): - The input parameters of this TrainingJob. - metadata (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageSegmentationMetadata): - The metadata information. - """ - - inputs = proto.Field( - proto.MESSAGE, - number=1, - message='AutoMlImageSegmentationInputs', - ) - metadata = proto.Field( - proto.MESSAGE, - number=2, - message='AutoMlImageSegmentationMetadata', - ) - - -class AutoMlImageSegmentationInputs(proto.Message): - r""" - - Attributes: - model_type (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageSegmentationInputs.ModelType): - - budget_milli_node_hours (int): - The training budget of creating this model, expressed in - milli node hours i.e. 1,000 value in this field means 1 node - hour. The actual metadata.costMilliNodeHours will be equal - or less than this value. If further model training ceases to - provide any improvements, it will stop without using the - full budget and the metadata.successfulStopReason will be - ``model-converged``. Note, node_hour = actual_hour \* - number_of_nodes_involved. Or actaul_wall_clock_hours = - train_budget_milli_node_hours / (number_of_nodes_involved \* - 1000) For modelType ``cloud-high-accuracy-1``\ (default), - the budget must be between 20,000 and 2,000,000 milli node - hours, inclusive. The default value is 192,000 which - represents one day in wall time (1000 milli \* 24 hours \* 8 - nodes). - base_model_id (str): - The ID of the ``base`` model. If it is specified, the new - model will be trained based on the ``base`` model. - Otherwise, the new model will be trained from scratch. The - ``base`` model must be in the same Project and Location as - the new Model to train, and have the same modelType. - """ - class ModelType(proto.Enum): - r"""""" - MODEL_TYPE_UNSPECIFIED = 0 - CLOUD_HIGH_ACCURACY_1 = 1 - CLOUD_LOW_ACCURACY_1 = 2 - MOBILE_TF_LOW_LATENCY_1 = 3 - - model_type = proto.Field( - proto.ENUM, - number=1, - enum=ModelType, - ) - budget_milli_node_hours = proto.Field( - proto.INT64, - number=2, - ) - base_model_id = proto.Field( - proto.STRING, - number=3, - ) - - -class AutoMlImageSegmentationMetadata(proto.Message): - r""" - - Attributes: - cost_milli_node_hours (int): - The actual training cost of creating this - model, expressed in milli node hours, i.e. 1,000 - value in this field means 1 node hour. - Guaranteed to not exceed - inputs.budgetMilliNodeHours. - successful_stop_reason (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlImageSegmentationMetadata.SuccessfulStopReason): - For successful job completions, this is the - reason why the job has finished. - """ - class SuccessfulStopReason(proto.Enum): - r"""""" - SUCCESSFUL_STOP_REASON_UNSPECIFIED = 0 - BUDGET_REACHED = 1 - MODEL_CONVERGED = 2 - - cost_milli_node_hours = proto.Field( - proto.INT64, - number=1, - ) - successful_stop_reason = proto.Field( - proto.ENUM, - number=2, - enum=SuccessfulStopReason, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_tables.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_tables.py deleted file mode 100644 index 51baaea115..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_tables.py +++ /dev/null @@ -1,529 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types import export_evaluated_data_items_config as gcastd_export_evaluated_data_items_config - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.trainingjob.definition', - manifest={ - 'AutoMlTables', - 'AutoMlTablesInputs', - 'AutoMlTablesMetadata', - }, -) - - -class AutoMlTables(proto.Message): - r"""A TrainingJob that trains and uploads an AutoML Tables Model. - - Attributes: - inputs (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs): - The input parameters of this TrainingJob. - metadata (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesMetadata): - The metadata information. - """ - - inputs = proto.Field( - proto.MESSAGE, - number=1, - message='AutoMlTablesInputs', - ) - metadata = proto.Field( - proto.MESSAGE, - number=2, - message='AutoMlTablesMetadata', - ) - - -class AutoMlTablesInputs(proto.Message): - r""" - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - optimization_objective_recall_value (float): - Required when optimization_objective is - "maximize-precision-at-recall". Must be between 0 and 1, - inclusive. - - This field is a member of `oneof`_ ``additional_optimization_objective_config``. - optimization_objective_precision_value (float): - Required when optimization_objective is - "maximize-recall-at-precision". Must be between 0 and 1, - inclusive. - - This field is a member of `oneof`_ ``additional_optimization_objective_config``. - prediction_type (str): - The type of prediction the Model is to - produce. "classification" - Predict one out of - multiple target values is - picked for each row. - "regression" - Predict a value based on its - relation to other values. This - type is available only to columns that contain - semantically numeric values, i.e. integers or - floating point number, even if - stored as e.g. strings. - target_column (str): - The column name of the target column that the - model is to predict. - transformations (Sequence[google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation]): - Each transformation will apply transform - function to given input column. And the result - will be used for training. When creating - transformation for BigQuery Struct column, the - column should be flattened using "." as the - delimiter. - optimization_objective (str): - Objective function the model is optimizing - towards. The training process creates a model - that maximizes/minimizes the value of the - objective function over the validation set. - - The supported optimization objectives depend on - the prediction type. If the field is not set, a - default objective function is used. - classification (binary): - "maximize-au-roc" (default) - Maximize the - area under the receiver - operating characteristic (ROC) curve. - "minimize-log-loss" - Minimize log loss. - "maximize-au-prc" - Maximize the area under - the precision-recall curve. "maximize- - precision-at-recall" - Maximize precision for a - specified - recall value. "maximize-recall-at-precision" - - Maximize recall for a specified - precision value. - classification (multi-class): - "minimize-log-loss" (default) - Minimize log - loss. - regression: - "minimize-rmse" (default) - Minimize root- - mean-squared error (RMSE). "minimize-mae" - - Minimize mean-absolute error (MAE). "minimize- - rmsle" - Minimize root-mean-squared log error - (RMSLE). - train_budget_milli_node_hours (int): - Required. The train budget of creating this - model, expressed in milli node hours i.e. 1,000 - value in this field means 1 node hour. - The training cost of the model will not exceed - this budget. The final cost will be attempted to - be close to the budget, though may end up being - (even) noticeably smaller - at the backend's - discretion. This especially may happen when - further model training ceases to provide any - improvements. - If the budget is set to a value known to be - insufficient to train a model for the given - dataset, the training won't be attempted and - will error. - - The train budget must be between 1,000 and - 72,000 milli node hours, inclusive. - disable_early_stopping (bool): - Use the entire training budget. This disables - the early stopping feature. By default, the - early stopping feature is enabled, which means - that AutoML Tables might stop training before - the entire training budget has been used. - weight_column_name (str): - Column name that should be used as the weight - column. Higher values in this column give more - importance to the row during model training. The - column must have numeric values between 0 and - 10000 inclusively; 0 means the row is ignored - for training. If weight column field is not set, - then all rows are assumed to have equal weight - of 1. - export_evaluated_data_items_config (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.ExportEvaluatedDataItemsConfig): - Configuration for exporting test set - predictions to a BigQuery table. If this - configuration is absent, then the export is not - performed. - additional_experiments (Sequence[str]): - Additional experiment flags for the Tables - training pipeline. - """ - - class Transformation(proto.Message): - r""" - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - auto (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation.AutoTransformation): - - This field is a member of `oneof`_ ``transformation_detail``. - numeric (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation.NumericTransformation): - - This field is a member of `oneof`_ ``transformation_detail``. - categorical (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation.CategoricalTransformation): - - This field is a member of `oneof`_ ``transformation_detail``. - timestamp (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation.TimestampTransformation): - - This field is a member of `oneof`_ ``transformation_detail``. - text (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation.TextTransformation): - - This field is a member of `oneof`_ ``transformation_detail``. - repeated_numeric (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation.NumericArrayTransformation): - - This field is a member of `oneof`_ ``transformation_detail``. - repeated_categorical (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation.CategoricalArrayTransformation): - - This field is a member of `oneof`_ ``transformation_detail``. - repeated_text (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTablesInputs.Transformation.TextArrayTransformation): - - This field is a member of `oneof`_ ``transformation_detail``. - """ - - class AutoTransformation(proto.Message): - r"""Training pipeline will infer the proper transformation based - on the statistic of dataset. - - Attributes: - column_name (str): - - """ - - column_name = proto.Field( - proto.STRING, - number=1, - ) - - class NumericTransformation(proto.Message): - r"""Training pipeline will perform following transformation functions. - - - The value converted to float32. - - The z_score of the value. - - log(value+1) when the value is greater than or equal to 0. - Otherwise, this transformation is not applied and the value is - considered a missing value. - - z_score of log(value+1) when the value is greater than or equal - to 0. Otherwise, this transformation is not applied and the value - is considered a missing value. - - A boolean value that indicates whether the value is valid. - - Attributes: - column_name (str): - - invalid_values_allowed (bool): - If invalid values is allowed, the training - pipeline will create a boolean feature that - indicated whether the value is valid. Otherwise, - the training pipeline will discard the input row - from trainining data. - """ - - column_name = proto.Field( - proto.STRING, - number=1, - ) - invalid_values_allowed = proto.Field( - proto.BOOL, - number=2, - ) - - class CategoricalTransformation(proto.Message): - r"""Training pipeline will perform following transformation functions. - - - The categorical string as is--no change to case, punctuation, - spelling, tense, and so on. - - Convert the category name to a dictionary lookup index and - generate an embedding for each index. - - Categories that appear less than 5 times in the training dataset - are treated as the "unknown" category. The "unknown" category - gets its own special lookup index and resulting embedding. - - Attributes: - column_name (str): - - """ - - column_name = proto.Field( - proto.STRING, - number=1, - ) - - class TimestampTransformation(proto.Message): - r"""Training pipeline will perform following transformation functions. - - - Apply the transformation functions for Numerical columns. - - Determine the year, month, day,and weekday. Treat each value from - the - - timestamp as a Categorical column. - - Invalid numerical values (for example, values that fall outside - of a typical timestamp range, or are extreme values) receive no - special treatment and are not removed. - - Attributes: - column_name (str): - - time_format (str): - The format in which that time field is expressed. The - time_format must either be one of: - - - ``unix-seconds`` - - ``unix-milliseconds`` - - ``unix-microseconds`` - - ``unix-nanoseconds`` (for respectively number of seconds, - milliseconds, microseconds and nanoseconds since start of - the Unix epoch); or be written in ``strftime`` syntax. If - time_format is not set, then the default format is RFC - 3339 ``date-time`` format, where ``time-offset`` = - ``"Z"`` (e.g. 1985-04-12T23:20:50.52Z) - invalid_values_allowed (bool): - If invalid values is allowed, the training - pipeline will create a boolean feature that - indicated whether the value is valid. Otherwise, - the training pipeline will discard the input row - from trainining data. - """ - - column_name = proto.Field( - proto.STRING, - number=1, - ) - time_format = proto.Field( - proto.STRING, - number=2, - ) - invalid_values_allowed = proto.Field( - proto.BOOL, - number=3, - ) - - class TextTransformation(proto.Message): - r"""Training pipeline will perform following transformation functions. - - - The text as is--no change to case, punctuation, spelling, tense, - and so on. - - Tokenize text to words. Convert each words to a dictionary lookup - index and generate an embedding for each index. Combine the - embedding of all elements into a single embedding using the mean. - - Tokenization is based on unicode script boundaries. - - Missing values get their own lookup index and resulting - embedding. - - Stop-words receive no special treatment and are not removed. - - Attributes: - column_name (str): - - """ - - column_name = proto.Field( - proto.STRING, - number=1, - ) - - class NumericArrayTransformation(proto.Message): - r"""Treats the column as numerical array and performs following - transformation functions. - - - All transformations for Numerical types applied to the average of - the all elements. - - The average of empty arrays is treated as zero. - - Attributes: - column_name (str): - - invalid_values_allowed (bool): - If invalid values is allowed, the training - pipeline will create a boolean feature that - indicated whether the value is valid. Otherwise, - the training pipeline will discard the input row - from trainining data. - """ - - column_name = proto.Field( - proto.STRING, - number=1, - ) - invalid_values_allowed = proto.Field( - proto.BOOL, - number=2, - ) - - class CategoricalArrayTransformation(proto.Message): - r"""Treats the column as categorical array and performs following - transformation functions. - - - For each element in the array, convert the category name to a - dictionary lookup index and generate an embedding for each index. - Combine the embedding of all elements into a single embedding - using the mean. - - Empty arrays treated as an embedding of zeroes. - - Attributes: - column_name (str): - - """ - - column_name = proto.Field( - proto.STRING, - number=1, - ) - - class TextArrayTransformation(proto.Message): - r"""Treats the column as text array and performs following - transformation functions. - - - Concatenate all text values in the array into a single text value - using a space (" ") as a delimiter, and then treat the result as - a single text value. Apply the transformations for Text columns. - - Empty arrays treated as an empty text. - - Attributes: - column_name (str): - - """ - - column_name = proto.Field( - proto.STRING, - number=1, - ) - - auto = proto.Field( - proto.MESSAGE, - number=1, - oneof='transformation_detail', - message='AutoMlTablesInputs.Transformation.AutoTransformation', - ) - numeric = proto.Field( - proto.MESSAGE, - number=2, - oneof='transformation_detail', - message='AutoMlTablesInputs.Transformation.NumericTransformation', - ) - categorical = proto.Field( - proto.MESSAGE, - number=3, - oneof='transformation_detail', - message='AutoMlTablesInputs.Transformation.CategoricalTransformation', - ) - timestamp = proto.Field( - proto.MESSAGE, - number=4, - oneof='transformation_detail', - message='AutoMlTablesInputs.Transformation.TimestampTransformation', - ) - text = proto.Field( - proto.MESSAGE, - number=5, - oneof='transformation_detail', - message='AutoMlTablesInputs.Transformation.TextTransformation', - ) - repeated_numeric = proto.Field( - proto.MESSAGE, - number=6, - oneof='transformation_detail', - message='AutoMlTablesInputs.Transformation.NumericArrayTransformation', - ) - repeated_categorical = proto.Field( - proto.MESSAGE, - number=7, - oneof='transformation_detail', - message='AutoMlTablesInputs.Transformation.CategoricalArrayTransformation', - ) - repeated_text = proto.Field( - proto.MESSAGE, - number=8, - oneof='transformation_detail', - message='AutoMlTablesInputs.Transformation.TextArrayTransformation', - ) - - optimization_objective_recall_value = proto.Field( - proto.FLOAT, - number=5, - oneof='additional_optimization_objective_config', - ) - optimization_objective_precision_value = proto.Field( - proto.FLOAT, - number=6, - oneof='additional_optimization_objective_config', - ) - prediction_type = proto.Field( - proto.STRING, - number=1, - ) - target_column = proto.Field( - proto.STRING, - number=2, - ) - transformations = proto.RepeatedField( - proto.MESSAGE, - number=3, - message=Transformation, - ) - optimization_objective = proto.Field( - proto.STRING, - number=4, - ) - train_budget_milli_node_hours = proto.Field( - proto.INT64, - number=7, - ) - disable_early_stopping = proto.Field( - proto.BOOL, - number=8, - ) - weight_column_name = proto.Field( - proto.STRING, - number=9, - ) - export_evaluated_data_items_config = proto.Field( - proto.MESSAGE, - number=10, - message=gcastd_export_evaluated_data_items_config.ExportEvaluatedDataItemsConfig, - ) - additional_experiments = proto.RepeatedField( - proto.STRING, - number=11, - ) - - -class AutoMlTablesMetadata(proto.Message): - r"""Model metadata specific to AutoML Tables. - - Attributes: - train_cost_milli_node_hours (int): - Output only. The actual training cost of the - model, expressed in milli node hours, i.e. 1,000 - value in this field means 1 node hour. - Guaranteed to not exceed the train budget. - """ - - train_cost_milli_node_hours = proto.Field( - proto.INT64, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_classification.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_classification.py deleted file mode 100644 index cfd3dde025..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_classification.py +++ /dev/null @@ -1,58 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.trainingjob.definition', - manifest={ - 'AutoMlTextClassification', - 'AutoMlTextClassificationInputs', - }, -) - - -class AutoMlTextClassification(proto.Message): - r"""A TrainingJob that trains and uploads an AutoML Text - Classification Model. - - Attributes: - inputs (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTextClassificationInputs): - The input parameters of this TrainingJob. - """ - - inputs = proto.Field( - proto.MESSAGE, - number=1, - message='AutoMlTextClassificationInputs', - ) - - -class AutoMlTextClassificationInputs(proto.Message): - r""" - - Attributes: - multi_label (bool): - - """ - - multi_label = proto.Field( - proto.BOOL, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_extraction.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_extraction.py deleted file mode 100644 index 4ba0b6f10b..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_extraction.py +++ /dev/null @@ -1,49 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.trainingjob.definition', - manifest={ - 'AutoMlTextExtraction', - 'AutoMlTextExtractionInputs', - }, -) - - -class AutoMlTextExtraction(proto.Message): - r"""A TrainingJob that trains and uploads an AutoML Text - Extraction Model. - - Attributes: - inputs (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTextExtractionInputs): - The input parameters of this TrainingJob. - """ - - inputs = proto.Field( - proto.MESSAGE, - number=1, - message='AutoMlTextExtractionInputs', - ) - - -class AutoMlTextExtractionInputs(proto.Message): - r""" - """ - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_sentiment.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_sentiment.py deleted file mode 100644 index 7ca24f7ebd..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_text_sentiment.py +++ /dev/null @@ -1,67 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.trainingjob.definition', - manifest={ - 'AutoMlTextSentiment', - 'AutoMlTextSentimentInputs', - }, -) - - -class AutoMlTextSentiment(proto.Message): - r"""A TrainingJob that trains and uploads an AutoML Text - Sentiment Model. - - Attributes: - inputs (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlTextSentimentInputs): - The input parameters of this TrainingJob. - """ - - inputs = proto.Field( - proto.MESSAGE, - number=1, - message='AutoMlTextSentimentInputs', - ) - - -class AutoMlTextSentimentInputs(proto.Message): - r""" - - Attributes: - sentiment_max (int): - A sentiment is expressed as an integer - ordinal, where higher value means a more - positive sentiment. The range of sentiments that - will be used is between 0 and sentimentMax - (inclusive on both ends), and all the values in - the range must be represented in the dataset - before a model can be created. - Only the Annotations with this sentimentMax will - be used for training. sentimentMax value must be - between 1 and 10 (inclusive). - """ - - sentiment_max = proto.Field( - proto.INT32, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_action_recognition.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_action_recognition.py deleted file mode 100644 index d7640dc1d5..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_action_recognition.py +++ /dev/null @@ -1,66 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.trainingjob.definition', - manifest={ - 'AutoMlVideoActionRecognition', - 'AutoMlVideoActionRecognitionInputs', - }, -) - - -class AutoMlVideoActionRecognition(proto.Message): - r"""A TrainingJob that trains and uploads an AutoML Video Action - Recognition Model. - - Attributes: - inputs (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlVideoActionRecognitionInputs): - The input parameters of this TrainingJob. - """ - - inputs = proto.Field( - proto.MESSAGE, - number=1, - message='AutoMlVideoActionRecognitionInputs', - ) - - -class AutoMlVideoActionRecognitionInputs(proto.Message): - r""" - - Attributes: - model_type (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlVideoActionRecognitionInputs.ModelType): - - """ - class ModelType(proto.Enum): - r"""""" - MODEL_TYPE_UNSPECIFIED = 0 - CLOUD = 1 - MOBILE_VERSATILE_1 = 2 - MOBILE_JETSON_VERSATILE_1 = 3 - MOBILE_CORAL_VERSATILE_1 = 4 - - model_type = proto.Field( - proto.ENUM, - number=1, - enum=ModelType, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_classification.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_classification.py deleted file mode 100644 index 2a9317b2de..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_classification.py +++ /dev/null @@ -1,65 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.trainingjob.definition', - manifest={ - 'AutoMlVideoClassification', - 'AutoMlVideoClassificationInputs', - }, -) - - -class AutoMlVideoClassification(proto.Message): - r"""A TrainingJob that trains and uploads an AutoML Video - Classification Model. - - Attributes: - inputs (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlVideoClassificationInputs): - The input parameters of this TrainingJob. - """ - - inputs = proto.Field( - proto.MESSAGE, - number=1, - message='AutoMlVideoClassificationInputs', - ) - - -class AutoMlVideoClassificationInputs(proto.Message): - r""" - - Attributes: - model_type (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlVideoClassificationInputs.ModelType): - - """ - class ModelType(proto.Enum): - r"""""" - MODEL_TYPE_UNSPECIFIED = 0 - CLOUD = 1 - MOBILE_VERSATILE_1 = 2 - MOBILE_JETSON_VERSATILE_1 = 3 - - model_type = proto.Field( - proto.ENUM, - number=1, - enum=ModelType, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_object_tracking.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_object_tracking.py deleted file mode 100644 index 821a7cd197..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/automl_video_object_tracking.py +++ /dev/null @@ -1,68 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.trainingjob.definition', - manifest={ - 'AutoMlVideoObjectTracking', - 'AutoMlVideoObjectTrackingInputs', - }, -) - - -class AutoMlVideoObjectTracking(proto.Message): - r"""A TrainingJob that trains and uploads an AutoML Video - ObjectTracking Model. - - Attributes: - inputs (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlVideoObjectTrackingInputs): - The input parameters of this TrainingJob. - """ - - inputs = proto.Field( - proto.MESSAGE, - number=1, - message='AutoMlVideoObjectTrackingInputs', - ) - - -class AutoMlVideoObjectTrackingInputs(proto.Message): - r""" - - Attributes: - model_type (google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.AutoMlVideoObjectTrackingInputs.ModelType): - - """ - class ModelType(proto.Enum): - r"""""" - MODEL_TYPE_UNSPECIFIED = 0 - CLOUD = 1 - MOBILE_VERSATILE_1 = 2 - MOBILE_CORAL_VERSATILE_1 = 3 - MOBILE_CORAL_LOW_LATENCY_1 = 4 - MOBILE_JETSON_VERSATILE_1 = 5 - MOBILE_JETSON_LOW_LATENCY_1 = 6 - - model_type = proto.Field( - proto.ENUM, - number=1, - enum=ModelType, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/export_evaluated_data_items_config.py b/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/export_evaluated_data_items_config.py deleted file mode 100644 index 8ac69278ee..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/types/export_evaluated_data_items_config.py +++ /dev/null @@ -1,57 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1.schema.trainingjob.definition', - manifest={ - 'ExportEvaluatedDataItemsConfig', - }, -) - - -class ExportEvaluatedDataItemsConfig(proto.Message): - r"""Configuration for exporting test set predictions to a - BigQuery table. - - Attributes: - destination_bigquery_uri (str): - URI of desired destination BigQuery table. Expected format: - bq://:: - - If not specified, then results are exported to the following - auto-created BigQuery table: - :export_evaluated_examples__.evaluated_examples - override_existing_table (bool): - If true and an export destination is - specified, then the contents of the destination - are overwritten. Otherwise, if the export - destination already exists, then the export - operation fails. - """ - - destination_bigquery_uri = proto.Field( - proto.STRING, - number=1, - ) - override_existing_table = proto.Field( - proto.BOOL, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/__init__.py deleted file mode 100644 index 7b646b0c20..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/__init__.py +++ /dev/null @@ -1,928 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from .services.dataset_service import DatasetServiceClient -from .services.dataset_service import DatasetServiceAsyncClient -from .services.endpoint_service import EndpointServiceClient -from .services.endpoint_service import EndpointServiceAsyncClient -from .services.featurestore_online_serving_service import FeaturestoreOnlineServingServiceClient -from .services.featurestore_online_serving_service import FeaturestoreOnlineServingServiceAsyncClient -from .services.featurestore_service import FeaturestoreServiceClient -from .services.featurestore_service import FeaturestoreServiceAsyncClient -from .services.index_endpoint_service import IndexEndpointServiceClient -from .services.index_endpoint_service import IndexEndpointServiceAsyncClient -from .services.index_service import IndexServiceClient -from .services.index_service import IndexServiceAsyncClient -from .services.job_service import JobServiceClient -from .services.job_service import JobServiceAsyncClient -from .services.metadata_service import MetadataServiceClient -from .services.metadata_service import MetadataServiceAsyncClient -from .services.migration_service import MigrationServiceClient -from .services.migration_service import MigrationServiceAsyncClient -from .services.model_service import ModelServiceClient -from .services.model_service import ModelServiceAsyncClient -from .services.pipeline_service import PipelineServiceClient -from .services.pipeline_service import PipelineServiceAsyncClient -from .services.prediction_service import PredictionServiceClient -from .services.prediction_service import PredictionServiceAsyncClient -from .services.specialist_pool_service import SpecialistPoolServiceClient -from .services.specialist_pool_service import SpecialistPoolServiceAsyncClient -from .services.tensorboard_service import TensorboardServiceClient -from .services.tensorboard_service import TensorboardServiceAsyncClient -from .services.vizier_service import VizierServiceClient -from .services.vizier_service import VizierServiceAsyncClient - -from .types.accelerator_type import AcceleratorType -from .types.annotation import Annotation -from .types.annotation_spec import AnnotationSpec -from .types.artifact import Artifact -from .types.batch_prediction_job import BatchPredictionJob -from .types.completion_stats import CompletionStats -from .types.context import Context -from .types.custom_job import ContainerSpec -from .types.custom_job import CustomJob -from .types.custom_job import CustomJobSpec -from .types.custom_job import PythonPackageSpec -from .types.custom_job import Scheduling -from .types.custom_job import WorkerPoolSpec -from .types.data_item import DataItem -from .types.data_labeling_job import ActiveLearningConfig -from .types.data_labeling_job import DataLabelingJob -from .types.data_labeling_job import SampleConfig -from .types.data_labeling_job import TrainingConfig -from .types.dataset import Dataset -from .types.dataset import ExportDataConfig -from .types.dataset import ImportDataConfig -from .types.dataset_service import CreateDatasetOperationMetadata -from .types.dataset_service import CreateDatasetRequest -from .types.dataset_service import DeleteDatasetRequest -from .types.dataset_service import ExportDataOperationMetadata -from .types.dataset_service import ExportDataRequest -from .types.dataset_service import ExportDataResponse -from .types.dataset_service import GetAnnotationSpecRequest -from .types.dataset_service import GetDatasetRequest -from .types.dataset_service import ImportDataOperationMetadata -from .types.dataset_service import ImportDataRequest -from .types.dataset_service import ImportDataResponse -from .types.dataset_service import ListAnnotationsRequest -from .types.dataset_service import ListAnnotationsResponse -from .types.dataset_service import ListDataItemsRequest -from .types.dataset_service import ListDataItemsResponse -from .types.dataset_service import ListDatasetsRequest -from .types.dataset_service import ListDatasetsResponse -from .types.dataset_service import UpdateDatasetRequest -from .types.deployed_index_ref import DeployedIndexRef -from .types.deployed_model_ref import DeployedModelRef -from .types.encryption_spec import EncryptionSpec -from .types.endpoint import DeployedModel -from .types.endpoint import Endpoint -from .types.endpoint import PrivateEndpoints -from .types.endpoint_service import CreateEndpointOperationMetadata -from .types.endpoint_service import CreateEndpointRequest -from .types.endpoint_service import DeleteEndpointRequest -from .types.endpoint_service import DeployModelOperationMetadata -from .types.endpoint_service import DeployModelRequest -from .types.endpoint_service import DeployModelResponse -from .types.endpoint_service import GetEndpointRequest -from .types.endpoint_service import ListEndpointsRequest -from .types.endpoint_service import ListEndpointsResponse -from .types.endpoint_service import UndeployModelOperationMetadata -from .types.endpoint_service import UndeployModelRequest -from .types.endpoint_service import UndeployModelResponse -from .types.endpoint_service import UpdateEndpointRequest -from .types.entity_type import EntityType -from .types.env_var import EnvVar -from .types.event import Event -from .types.execution import Execution -from .types.explanation import Attribution -from .types.explanation import BlurBaselineConfig -from .types.explanation import Explanation -from .types.explanation import ExplanationMetadataOverride -from .types.explanation import ExplanationParameters -from .types.explanation import ExplanationSpec -from .types.explanation import ExplanationSpecOverride -from .types.explanation import FeatureNoiseSigma -from .types.explanation import IntegratedGradientsAttribution -from .types.explanation import ModelExplanation -from .types.explanation import SampledShapleyAttribution -from .types.explanation import SmoothGradConfig -from .types.explanation import XraiAttribution -from .types.explanation_metadata import ExplanationMetadata -from .types.feature import Feature -from .types.feature_monitoring_stats import FeatureStatsAnomaly -from .types.feature_selector import FeatureSelector -from .types.feature_selector import IdMatcher -from .types.featurestore import Featurestore -from .types.featurestore_online_service import FeatureValue -from .types.featurestore_online_service import FeatureValueList -from .types.featurestore_online_service import ReadFeatureValuesRequest -from .types.featurestore_online_service import ReadFeatureValuesResponse -from .types.featurestore_online_service import StreamingReadFeatureValuesRequest -from .types.featurestore_service import BatchCreateFeaturesOperationMetadata -from .types.featurestore_service import BatchCreateFeaturesRequest -from .types.featurestore_service import BatchCreateFeaturesResponse -from .types.featurestore_service import BatchReadFeatureValuesOperationMetadata -from .types.featurestore_service import BatchReadFeatureValuesRequest -from .types.featurestore_service import BatchReadFeatureValuesResponse -from .types.featurestore_service import CreateEntityTypeOperationMetadata -from .types.featurestore_service import CreateEntityTypeRequest -from .types.featurestore_service import CreateFeatureOperationMetadata -from .types.featurestore_service import CreateFeatureRequest -from .types.featurestore_service import CreateFeaturestoreOperationMetadata -from .types.featurestore_service import CreateFeaturestoreRequest -from .types.featurestore_service import DeleteEntityTypeRequest -from .types.featurestore_service import DeleteFeatureRequest -from .types.featurestore_service import DeleteFeaturestoreRequest -from .types.featurestore_service import DestinationFeatureSetting -from .types.featurestore_service import ExportFeatureValuesOperationMetadata -from .types.featurestore_service import ExportFeatureValuesRequest -from .types.featurestore_service import ExportFeatureValuesResponse -from .types.featurestore_service import FeatureValueDestination -from .types.featurestore_service import GetEntityTypeRequest -from .types.featurestore_service import GetFeatureRequest -from .types.featurestore_service import GetFeaturestoreRequest -from .types.featurestore_service import ImportFeatureValuesOperationMetadata -from .types.featurestore_service import ImportFeatureValuesRequest -from .types.featurestore_service import ImportFeatureValuesResponse -from .types.featurestore_service import ListEntityTypesRequest -from .types.featurestore_service import ListEntityTypesResponse -from .types.featurestore_service import ListFeaturesRequest -from .types.featurestore_service import ListFeaturesResponse -from .types.featurestore_service import ListFeaturestoresRequest -from .types.featurestore_service import ListFeaturestoresResponse -from .types.featurestore_service import SearchFeaturesRequest -from .types.featurestore_service import SearchFeaturesResponse -from .types.featurestore_service import UpdateEntityTypeRequest -from .types.featurestore_service import UpdateFeatureRequest -from .types.featurestore_service import UpdateFeaturestoreOperationMetadata -from .types.featurestore_service import UpdateFeaturestoreRequest -from .types.hyperparameter_tuning_job import HyperparameterTuningJob -from .types.index import Index -from .types.index_endpoint import DeployedIndex -from .types.index_endpoint import DeployedIndexAuthConfig -from .types.index_endpoint import IndexEndpoint -from .types.index_endpoint import IndexPrivateEndpoints -from .types.index_endpoint_service import CreateIndexEndpointOperationMetadata -from .types.index_endpoint_service import CreateIndexEndpointRequest -from .types.index_endpoint_service import DeleteIndexEndpointRequest -from .types.index_endpoint_service import DeployIndexOperationMetadata -from .types.index_endpoint_service import DeployIndexRequest -from .types.index_endpoint_service import DeployIndexResponse -from .types.index_endpoint_service import GetIndexEndpointRequest -from .types.index_endpoint_service import ListIndexEndpointsRequest -from .types.index_endpoint_service import ListIndexEndpointsResponse -from .types.index_endpoint_service import MutateDeployedIndexOperationMetadata -from .types.index_endpoint_service import MutateDeployedIndexRequest -from .types.index_endpoint_service import MutateDeployedIndexResponse -from .types.index_endpoint_service import UndeployIndexOperationMetadata -from .types.index_endpoint_service import UndeployIndexRequest -from .types.index_endpoint_service import UndeployIndexResponse -from .types.index_endpoint_service import UpdateIndexEndpointRequest -from .types.index_service import CreateIndexOperationMetadata -from .types.index_service import CreateIndexRequest -from .types.index_service import DeleteIndexRequest -from .types.index_service import GetIndexRequest -from .types.index_service import ListIndexesRequest -from .types.index_service import ListIndexesResponse -from .types.index_service import NearestNeighborSearchOperationMetadata -from .types.index_service import UpdateIndexOperationMetadata -from .types.index_service import UpdateIndexRequest -from .types.io import AvroSource -from .types.io import BigQueryDestination -from .types.io import BigQuerySource -from .types.io import ContainerRegistryDestination -from .types.io import CsvDestination -from .types.io import CsvSource -from .types.io import GcsDestination -from .types.io import GcsSource -from .types.io import TFRecordDestination -from .types.job_service import CancelBatchPredictionJobRequest -from .types.job_service import CancelCustomJobRequest -from .types.job_service import CancelDataLabelingJobRequest -from .types.job_service import CancelHyperparameterTuningJobRequest -from .types.job_service import CreateBatchPredictionJobRequest -from .types.job_service import CreateCustomJobRequest -from .types.job_service import CreateDataLabelingJobRequest -from .types.job_service import CreateHyperparameterTuningJobRequest -from .types.job_service import CreateModelDeploymentMonitoringJobRequest -from .types.job_service import DeleteBatchPredictionJobRequest -from .types.job_service import DeleteCustomJobRequest -from .types.job_service import DeleteDataLabelingJobRequest -from .types.job_service import DeleteHyperparameterTuningJobRequest -from .types.job_service import DeleteModelDeploymentMonitoringJobRequest -from .types.job_service import GetBatchPredictionJobRequest -from .types.job_service import GetCustomJobRequest -from .types.job_service import GetDataLabelingJobRequest -from .types.job_service import GetHyperparameterTuningJobRequest -from .types.job_service import GetModelDeploymentMonitoringJobRequest -from .types.job_service import ListBatchPredictionJobsRequest -from .types.job_service import ListBatchPredictionJobsResponse -from .types.job_service import ListCustomJobsRequest -from .types.job_service import ListCustomJobsResponse -from .types.job_service import ListDataLabelingJobsRequest -from .types.job_service import ListDataLabelingJobsResponse -from .types.job_service import ListHyperparameterTuningJobsRequest -from .types.job_service import ListHyperparameterTuningJobsResponse -from .types.job_service import ListModelDeploymentMonitoringJobsRequest -from .types.job_service import ListModelDeploymentMonitoringJobsResponse -from .types.job_service import PauseModelDeploymentMonitoringJobRequest -from .types.job_service import ResumeModelDeploymentMonitoringJobRequest -from .types.job_service import SearchModelDeploymentMonitoringStatsAnomaliesRequest -from .types.job_service import SearchModelDeploymentMonitoringStatsAnomaliesResponse -from .types.job_service import UpdateModelDeploymentMonitoringJobOperationMetadata -from .types.job_service import UpdateModelDeploymentMonitoringJobRequest -from .types.job_state import JobState -from .types.lineage_subgraph import LineageSubgraph -from .types.machine_resources import AutomaticResources -from .types.machine_resources import AutoscalingMetricSpec -from .types.machine_resources import BatchDedicatedResources -from .types.machine_resources import DedicatedResources -from .types.machine_resources import DiskSpec -from .types.machine_resources import MachineSpec -from .types.machine_resources import ResourcesConsumed -from .types.manual_batch_tuning_parameters import ManualBatchTuningParameters -from .types.metadata_schema import MetadataSchema -from .types.metadata_service import AddContextArtifactsAndExecutionsRequest -from .types.metadata_service import AddContextArtifactsAndExecutionsResponse -from .types.metadata_service import AddContextChildrenRequest -from .types.metadata_service import AddContextChildrenResponse -from .types.metadata_service import AddExecutionEventsRequest -from .types.metadata_service import AddExecutionEventsResponse -from .types.metadata_service import CreateArtifactRequest -from .types.metadata_service import CreateContextRequest -from .types.metadata_service import CreateExecutionRequest -from .types.metadata_service import CreateMetadataSchemaRequest -from .types.metadata_service import CreateMetadataStoreOperationMetadata -from .types.metadata_service import CreateMetadataStoreRequest -from .types.metadata_service import DeleteArtifactRequest -from .types.metadata_service import DeleteContextRequest -from .types.metadata_service import DeleteExecutionRequest -from .types.metadata_service import DeleteMetadataStoreOperationMetadata -from .types.metadata_service import DeleteMetadataStoreRequest -from .types.metadata_service import GetArtifactRequest -from .types.metadata_service import GetContextRequest -from .types.metadata_service import GetExecutionRequest -from .types.metadata_service import GetMetadataSchemaRequest -from .types.metadata_service import GetMetadataStoreRequest -from .types.metadata_service import ListArtifactsRequest -from .types.metadata_service import ListArtifactsResponse -from .types.metadata_service import ListContextsRequest -from .types.metadata_service import ListContextsResponse -from .types.metadata_service import ListExecutionsRequest -from .types.metadata_service import ListExecutionsResponse -from .types.metadata_service import ListMetadataSchemasRequest -from .types.metadata_service import ListMetadataSchemasResponse -from .types.metadata_service import ListMetadataStoresRequest -from .types.metadata_service import ListMetadataStoresResponse -from .types.metadata_service import PurgeArtifactsMetadata -from .types.metadata_service import PurgeArtifactsRequest -from .types.metadata_service import PurgeArtifactsResponse -from .types.metadata_service import PurgeContextsMetadata -from .types.metadata_service import PurgeContextsRequest -from .types.metadata_service import PurgeContextsResponse -from .types.metadata_service import PurgeExecutionsMetadata -from .types.metadata_service import PurgeExecutionsRequest -from .types.metadata_service import PurgeExecutionsResponse -from .types.metadata_service import QueryArtifactLineageSubgraphRequest -from .types.metadata_service import QueryContextLineageSubgraphRequest -from .types.metadata_service import QueryExecutionInputsAndOutputsRequest -from .types.metadata_service import UpdateArtifactRequest -from .types.metadata_service import UpdateContextRequest -from .types.metadata_service import UpdateExecutionRequest -from .types.metadata_store import MetadataStore -from .types.migratable_resource import MigratableResource -from .types.migration_service import BatchMigrateResourcesOperationMetadata -from .types.migration_service import BatchMigrateResourcesRequest -from .types.migration_service import BatchMigrateResourcesResponse -from .types.migration_service import MigrateResourceRequest -from .types.migration_service import MigrateResourceResponse -from .types.migration_service import SearchMigratableResourcesRequest -from .types.migration_service import SearchMigratableResourcesResponse -from .types.model import Model -from .types.model import ModelContainerSpec -from .types.model import Port -from .types.model import PredictSchemata -from .types.model_deployment_monitoring_job import ModelDeploymentMonitoringBigQueryTable -from .types.model_deployment_monitoring_job import ModelDeploymentMonitoringJob -from .types.model_deployment_monitoring_job import ModelDeploymentMonitoringObjectiveConfig -from .types.model_deployment_monitoring_job import ModelDeploymentMonitoringScheduleConfig -from .types.model_deployment_monitoring_job import ModelMonitoringStatsAnomalies -from .types.model_deployment_monitoring_job import ModelDeploymentMonitoringObjectiveType -from .types.model_evaluation import ModelEvaluation -from .types.model_evaluation_slice import ModelEvaluationSlice -from .types.model_monitoring import ModelMonitoringAlertConfig -from .types.model_monitoring import ModelMonitoringObjectiveConfig -from .types.model_monitoring import SamplingStrategy -from .types.model_monitoring import ThresholdConfig -from .types.model_service import DeleteModelRequest -from .types.model_service import ExportModelOperationMetadata -from .types.model_service import ExportModelRequest -from .types.model_service import ExportModelResponse -from .types.model_service import GetModelEvaluationRequest -from .types.model_service import GetModelEvaluationSliceRequest -from .types.model_service import GetModelRequest -from .types.model_service import ListModelEvaluationSlicesRequest -from .types.model_service import ListModelEvaluationSlicesResponse -from .types.model_service import ListModelEvaluationsRequest -from .types.model_service import ListModelEvaluationsResponse -from .types.model_service import ListModelsRequest -from .types.model_service import ListModelsResponse -from .types.model_service import UpdateModelRequest -from .types.model_service import UploadModelOperationMetadata -from .types.model_service import UploadModelRequest -from .types.model_service import UploadModelResponse -from .types.operation import DeleteOperationMetadata -from .types.operation import GenericOperationMetadata -from .types.pipeline_job import PipelineJob -from .types.pipeline_job import PipelineJobDetail -from .types.pipeline_job import PipelineTaskDetail -from .types.pipeline_job import PipelineTaskExecutorDetail -from .types.pipeline_service import CancelPipelineJobRequest -from .types.pipeline_service import CancelTrainingPipelineRequest -from .types.pipeline_service import CreatePipelineJobRequest -from .types.pipeline_service import CreateTrainingPipelineRequest -from .types.pipeline_service import DeletePipelineJobRequest -from .types.pipeline_service import DeleteTrainingPipelineRequest -from .types.pipeline_service import GetPipelineJobRequest -from .types.pipeline_service import GetTrainingPipelineRequest -from .types.pipeline_service import ListPipelineJobsRequest -from .types.pipeline_service import ListPipelineJobsResponse -from .types.pipeline_service import ListTrainingPipelinesRequest -from .types.pipeline_service import ListTrainingPipelinesResponse -from .types.pipeline_state import PipelineState -from .types.prediction_service import ExplainRequest -from .types.prediction_service import ExplainResponse -from .types.prediction_service import PredictRequest -from .types.prediction_service import PredictResponse -from .types.prediction_service import RawPredictRequest -from .types.specialist_pool import SpecialistPool -from .types.specialist_pool_service import CreateSpecialistPoolOperationMetadata -from .types.specialist_pool_service import CreateSpecialistPoolRequest -from .types.specialist_pool_service import DeleteSpecialistPoolRequest -from .types.specialist_pool_service import GetSpecialistPoolRequest -from .types.specialist_pool_service import ListSpecialistPoolsRequest -from .types.specialist_pool_service import ListSpecialistPoolsResponse -from .types.specialist_pool_service import UpdateSpecialistPoolOperationMetadata -from .types.specialist_pool_service import UpdateSpecialistPoolRequest -from .types.study import Measurement -from .types.study import Study -from .types.study import StudySpec -from .types.study import Trial -from .types.tensorboard import Tensorboard -from .types.tensorboard_data import Scalar -from .types.tensorboard_data import TensorboardBlob -from .types.tensorboard_data import TensorboardBlobSequence -from .types.tensorboard_data import TensorboardTensor -from .types.tensorboard_data import TimeSeriesData -from .types.tensorboard_data import TimeSeriesDataPoint -from .types.tensorboard_experiment import TensorboardExperiment -from .types.tensorboard_run import TensorboardRun -from .types.tensorboard_service import BatchCreateTensorboardRunsRequest -from .types.tensorboard_service import BatchCreateTensorboardRunsResponse -from .types.tensorboard_service import BatchCreateTensorboardTimeSeriesRequest -from .types.tensorboard_service import BatchCreateTensorboardTimeSeriesResponse -from .types.tensorboard_service import BatchReadTensorboardTimeSeriesDataRequest -from .types.tensorboard_service import BatchReadTensorboardTimeSeriesDataResponse -from .types.tensorboard_service import CreateTensorboardExperimentRequest -from .types.tensorboard_service import CreateTensorboardOperationMetadata -from .types.tensorboard_service import CreateTensorboardRequest -from .types.tensorboard_service import CreateTensorboardRunRequest -from .types.tensorboard_service import CreateTensorboardTimeSeriesRequest -from .types.tensorboard_service import DeleteTensorboardExperimentRequest -from .types.tensorboard_service import DeleteTensorboardRequest -from .types.tensorboard_service import DeleteTensorboardRunRequest -from .types.tensorboard_service import DeleteTensorboardTimeSeriesRequest -from .types.tensorboard_service import ExportTensorboardTimeSeriesDataRequest -from .types.tensorboard_service import ExportTensorboardTimeSeriesDataResponse -from .types.tensorboard_service import GetTensorboardExperimentRequest -from .types.tensorboard_service import GetTensorboardRequest -from .types.tensorboard_service import GetTensorboardRunRequest -from .types.tensorboard_service import GetTensorboardTimeSeriesRequest -from .types.tensorboard_service import ListTensorboardExperimentsRequest -from .types.tensorboard_service import ListTensorboardExperimentsResponse -from .types.tensorboard_service import ListTensorboardRunsRequest -from .types.tensorboard_service import ListTensorboardRunsResponse -from .types.tensorboard_service import ListTensorboardsRequest -from .types.tensorboard_service import ListTensorboardsResponse -from .types.tensorboard_service import ListTensorboardTimeSeriesRequest -from .types.tensorboard_service import ListTensorboardTimeSeriesResponse -from .types.tensorboard_service import ReadTensorboardBlobDataRequest -from .types.tensorboard_service import ReadTensorboardBlobDataResponse -from .types.tensorboard_service import ReadTensorboardTimeSeriesDataRequest -from .types.tensorboard_service import ReadTensorboardTimeSeriesDataResponse -from .types.tensorboard_service import UpdateTensorboardExperimentRequest -from .types.tensorboard_service import UpdateTensorboardOperationMetadata -from .types.tensorboard_service import UpdateTensorboardRequest -from .types.tensorboard_service import UpdateTensorboardRunRequest -from .types.tensorboard_service import UpdateTensorboardTimeSeriesRequest -from .types.tensorboard_service import WriteTensorboardExperimentDataRequest -from .types.tensorboard_service import WriteTensorboardExperimentDataResponse -from .types.tensorboard_service import WriteTensorboardRunDataRequest -from .types.tensorboard_service import WriteTensorboardRunDataResponse -from .types.tensorboard_time_series import TensorboardTimeSeries -from .types.training_pipeline import FilterSplit -from .types.training_pipeline import FractionSplit -from .types.training_pipeline import InputDataConfig -from .types.training_pipeline import PredefinedSplit -from .types.training_pipeline import StratifiedSplit -from .types.training_pipeline import TimestampSplit -from .types.training_pipeline import TrainingPipeline -from .types.types import BoolArray -from .types.types import DoubleArray -from .types.types import Int64Array -from .types.types import StringArray -from .types.unmanaged_container_model import UnmanagedContainerModel -from .types.user_action_reference import UserActionReference -from .types.value import Value -from .types.vizier_service import AddTrialMeasurementRequest -from .types.vizier_service import CheckTrialEarlyStoppingStateMetatdata -from .types.vizier_service import CheckTrialEarlyStoppingStateRequest -from .types.vizier_service import CheckTrialEarlyStoppingStateResponse -from .types.vizier_service import CompleteTrialRequest -from .types.vizier_service import CreateStudyRequest -from .types.vizier_service import CreateTrialRequest -from .types.vizier_service import DeleteStudyRequest -from .types.vizier_service import DeleteTrialRequest -from .types.vizier_service import GetStudyRequest -from .types.vizier_service import GetTrialRequest -from .types.vizier_service import ListOptimalTrialsRequest -from .types.vizier_service import ListOptimalTrialsResponse -from .types.vizier_service import ListStudiesRequest -from .types.vizier_service import ListStudiesResponse -from .types.vizier_service import ListTrialsRequest -from .types.vizier_service import ListTrialsResponse -from .types.vizier_service import LookupStudyRequest -from .types.vizier_service import StopTrialRequest -from .types.vizier_service import SuggestTrialsMetadata -from .types.vizier_service import SuggestTrialsRequest -from .types.vizier_service import SuggestTrialsResponse - -__all__ = ( - 'DatasetServiceAsyncClient', - 'EndpointServiceAsyncClient', - 'FeaturestoreOnlineServingServiceAsyncClient', - 'FeaturestoreServiceAsyncClient', - 'IndexEndpointServiceAsyncClient', - 'IndexServiceAsyncClient', - 'JobServiceAsyncClient', - 'MetadataServiceAsyncClient', - 'MigrationServiceAsyncClient', - 'ModelServiceAsyncClient', - 'PipelineServiceAsyncClient', - 'PredictionServiceAsyncClient', - 'SpecialistPoolServiceAsyncClient', - 'TensorboardServiceAsyncClient', - 'VizierServiceAsyncClient', -'AcceleratorType', -'ActiveLearningConfig', -'AddContextArtifactsAndExecutionsRequest', -'AddContextArtifactsAndExecutionsResponse', -'AddContextChildrenRequest', -'AddContextChildrenResponse', -'AddExecutionEventsRequest', -'AddExecutionEventsResponse', -'AddTrialMeasurementRequest', -'Annotation', -'AnnotationSpec', -'Artifact', -'Attribution', -'AutomaticResources', -'AutoscalingMetricSpec', -'AvroSource', -'BatchCreateFeaturesOperationMetadata', -'BatchCreateFeaturesRequest', -'BatchCreateFeaturesResponse', -'BatchCreateTensorboardRunsRequest', -'BatchCreateTensorboardRunsResponse', -'BatchCreateTensorboardTimeSeriesRequest', -'BatchCreateTensorboardTimeSeriesResponse', -'BatchDedicatedResources', -'BatchMigrateResourcesOperationMetadata', -'BatchMigrateResourcesRequest', -'BatchMigrateResourcesResponse', -'BatchPredictionJob', -'BatchReadFeatureValuesOperationMetadata', -'BatchReadFeatureValuesRequest', -'BatchReadFeatureValuesResponse', -'BatchReadTensorboardTimeSeriesDataRequest', -'BatchReadTensorboardTimeSeriesDataResponse', -'BigQueryDestination', -'BigQuerySource', -'BlurBaselineConfig', -'BoolArray', -'CancelBatchPredictionJobRequest', -'CancelCustomJobRequest', -'CancelDataLabelingJobRequest', -'CancelHyperparameterTuningJobRequest', -'CancelPipelineJobRequest', -'CancelTrainingPipelineRequest', -'CheckTrialEarlyStoppingStateMetatdata', -'CheckTrialEarlyStoppingStateRequest', -'CheckTrialEarlyStoppingStateResponse', -'CompleteTrialRequest', -'CompletionStats', -'ContainerRegistryDestination', -'ContainerSpec', -'Context', -'CreateArtifactRequest', -'CreateBatchPredictionJobRequest', -'CreateContextRequest', -'CreateCustomJobRequest', -'CreateDataLabelingJobRequest', -'CreateDatasetOperationMetadata', -'CreateDatasetRequest', -'CreateEndpointOperationMetadata', -'CreateEndpointRequest', -'CreateEntityTypeOperationMetadata', -'CreateEntityTypeRequest', -'CreateExecutionRequest', -'CreateFeatureOperationMetadata', -'CreateFeatureRequest', -'CreateFeaturestoreOperationMetadata', -'CreateFeaturestoreRequest', -'CreateHyperparameterTuningJobRequest', -'CreateIndexEndpointOperationMetadata', -'CreateIndexEndpointRequest', -'CreateIndexOperationMetadata', -'CreateIndexRequest', -'CreateMetadataSchemaRequest', -'CreateMetadataStoreOperationMetadata', -'CreateMetadataStoreRequest', -'CreateModelDeploymentMonitoringJobRequest', -'CreatePipelineJobRequest', -'CreateSpecialistPoolOperationMetadata', -'CreateSpecialistPoolRequest', -'CreateStudyRequest', -'CreateTensorboardExperimentRequest', -'CreateTensorboardOperationMetadata', -'CreateTensorboardRequest', -'CreateTensorboardRunRequest', -'CreateTensorboardTimeSeriesRequest', -'CreateTrainingPipelineRequest', -'CreateTrialRequest', -'CsvDestination', -'CsvSource', -'CustomJob', -'CustomJobSpec', -'DataItem', -'DataLabelingJob', -'Dataset', -'DatasetServiceClient', -'DedicatedResources', -'DeleteArtifactRequest', -'DeleteBatchPredictionJobRequest', -'DeleteContextRequest', -'DeleteCustomJobRequest', -'DeleteDataLabelingJobRequest', -'DeleteDatasetRequest', -'DeleteEndpointRequest', -'DeleteEntityTypeRequest', -'DeleteExecutionRequest', -'DeleteFeatureRequest', -'DeleteFeaturestoreRequest', -'DeleteHyperparameterTuningJobRequest', -'DeleteIndexEndpointRequest', -'DeleteIndexRequest', -'DeleteMetadataStoreOperationMetadata', -'DeleteMetadataStoreRequest', -'DeleteModelDeploymentMonitoringJobRequest', -'DeleteModelRequest', -'DeleteOperationMetadata', -'DeletePipelineJobRequest', -'DeleteSpecialistPoolRequest', -'DeleteStudyRequest', -'DeleteTensorboardExperimentRequest', -'DeleteTensorboardRequest', -'DeleteTensorboardRunRequest', -'DeleteTensorboardTimeSeriesRequest', -'DeleteTrainingPipelineRequest', -'DeleteTrialRequest', -'DeployIndexOperationMetadata', -'DeployIndexRequest', -'DeployIndexResponse', -'DeployModelOperationMetadata', -'DeployModelRequest', -'DeployModelResponse', -'DeployedIndex', -'DeployedIndexAuthConfig', -'DeployedIndexRef', -'DeployedModel', -'DeployedModelRef', -'DestinationFeatureSetting', -'DiskSpec', -'DoubleArray', -'EncryptionSpec', -'Endpoint', -'EndpointServiceClient', -'EntityType', -'EnvVar', -'Event', -'Execution', -'ExplainRequest', -'ExplainResponse', -'Explanation', -'ExplanationMetadata', -'ExplanationMetadataOverride', -'ExplanationParameters', -'ExplanationSpec', -'ExplanationSpecOverride', -'ExportDataConfig', -'ExportDataOperationMetadata', -'ExportDataRequest', -'ExportDataResponse', -'ExportFeatureValuesOperationMetadata', -'ExportFeatureValuesRequest', -'ExportFeatureValuesResponse', -'ExportModelOperationMetadata', -'ExportModelRequest', -'ExportModelResponse', -'ExportTensorboardTimeSeriesDataRequest', -'ExportTensorboardTimeSeriesDataResponse', -'Feature', -'FeatureNoiseSigma', -'FeatureSelector', -'FeatureStatsAnomaly', -'FeatureValue', -'FeatureValueDestination', -'FeatureValueList', -'Featurestore', -'FeaturestoreOnlineServingServiceClient', -'FeaturestoreServiceClient', -'FilterSplit', -'FractionSplit', -'GcsDestination', -'GcsSource', -'GenericOperationMetadata', -'GetAnnotationSpecRequest', -'GetArtifactRequest', -'GetBatchPredictionJobRequest', -'GetContextRequest', -'GetCustomJobRequest', -'GetDataLabelingJobRequest', -'GetDatasetRequest', -'GetEndpointRequest', -'GetEntityTypeRequest', -'GetExecutionRequest', -'GetFeatureRequest', -'GetFeaturestoreRequest', -'GetHyperparameterTuningJobRequest', -'GetIndexEndpointRequest', -'GetIndexRequest', -'GetMetadataSchemaRequest', -'GetMetadataStoreRequest', -'GetModelDeploymentMonitoringJobRequest', -'GetModelEvaluationRequest', -'GetModelEvaluationSliceRequest', -'GetModelRequest', -'GetPipelineJobRequest', -'GetSpecialistPoolRequest', -'GetStudyRequest', -'GetTensorboardExperimentRequest', -'GetTensorboardRequest', -'GetTensorboardRunRequest', -'GetTensorboardTimeSeriesRequest', -'GetTrainingPipelineRequest', -'GetTrialRequest', -'HyperparameterTuningJob', -'IdMatcher', -'ImportDataConfig', -'ImportDataOperationMetadata', -'ImportDataRequest', -'ImportDataResponse', -'ImportFeatureValuesOperationMetadata', -'ImportFeatureValuesRequest', -'ImportFeatureValuesResponse', -'Index', -'IndexEndpoint', -'IndexEndpointServiceClient', -'IndexPrivateEndpoints', -'IndexServiceClient', -'InputDataConfig', -'Int64Array', -'IntegratedGradientsAttribution', -'JobServiceClient', -'JobState', -'LineageSubgraph', -'ListAnnotationsRequest', -'ListAnnotationsResponse', -'ListArtifactsRequest', -'ListArtifactsResponse', -'ListBatchPredictionJobsRequest', -'ListBatchPredictionJobsResponse', -'ListContextsRequest', -'ListContextsResponse', -'ListCustomJobsRequest', -'ListCustomJobsResponse', -'ListDataItemsRequest', -'ListDataItemsResponse', -'ListDataLabelingJobsRequest', -'ListDataLabelingJobsResponse', -'ListDatasetsRequest', -'ListDatasetsResponse', -'ListEndpointsRequest', -'ListEndpointsResponse', -'ListEntityTypesRequest', -'ListEntityTypesResponse', -'ListExecutionsRequest', -'ListExecutionsResponse', -'ListFeaturesRequest', -'ListFeaturesResponse', -'ListFeaturestoresRequest', -'ListFeaturestoresResponse', -'ListHyperparameterTuningJobsRequest', -'ListHyperparameterTuningJobsResponse', -'ListIndexEndpointsRequest', -'ListIndexEndpointsResponse', -'ListIndexesRequest', -'ListIndexesResponse', -'ListMetadataSchemasRequest', -'ListMetadataSchemasResponse', -'ListMetadataStoresRequest', -'ListMetadataStoresResponse', -'ListModelDeploymentMonitoringJobsRequest', -'ListModelDeploymentMonitoringJobsResponse', -'ListModelEvaluationSlicesRequest', -'ListModelEvaluationSlicesResponse', -'ListModelEvaluationsRequest', -'ListModelEvaluationsResponse', -'ListModelsRequest', -'ListModelsResponse', -'ListOptimalTrialsRequest', -'ListOptimalTrialsResponse', -'ListPipelineJobsRequest', -'ListPipelineJobsResponse', -'ListSpecialistPoolsRequest', -'ListSpecialistPoolsResponse', -'ListStudiesRequest', -'ListStudiesResponse', -'ListTensorboardExperimentsRequest', -'ListTensorboardExperimentsResponse', -'ListTensorboardRunsRequest', -'ListTensorboardRunsResponse', -'ListTensorboardTimeSeriesRequest', -'ListTensorboardTimeSeriesResponse', -'ListTensorboardsRequest', -'ListTensorboardsResponse', -'ListTrainingPipelinesRequest', -'ListTrainingPipelinesResponse', -'ListTrialsRequest', -'ListTrialsResponse', -'LookupStudyRequest', -'MachineSpec', -'ManualBatchTuningParameters', -'Measurement', -'MetadataSchema', -'MetadataServiceClient', -'MetadataStore', -'MigratableResource', -'MigrateResourceRequest', -'MigrateResourceResponse', -'MigrationServiceClient', -'Model', -'ModelContainerSpec', -'ModelDeploymentMonitoringBigQueryTable', -'ModelDeploymentMonitoringJob', -'ModelDeploymentMonitoringObjectiveConfig', -'ModelDeploymentMonitoringObjectiveType', -'ModelDeploymentMonitoringScheduleConfig', -'ModelEvaluation', -'ModelEvaluationSlice', -'ModelExplanation', -'ModelMonitoringAlertConfig', -'ModelMonitoringObjectiveConfig', -'ModelMonitoringStatsAnomalies', -'ModelServiceClient', -'MutateDeployedIndexOperationMetadata', -'MutateDeployedIndexRequest', -'MutateDeployedIndexResponse', -'NearestNeighborSearchOperationMetadata', -'PauseModelDeploymentMonitoringJobRequest', -'PipelineJob', -'PipelineJobDetail', -'PipelineServiceClient', -'PipelineState', -'PipelineTaskDetail', -'PipelineTaskExecutorDetail', -'Port', -'PredefinedSplit', -'PredictRequest', -'PredictResponse', -'PredictSchemata', -'PredictionServiceClient', -'PrivateEndpoints', -'PurgeArtifactsMetadata', -'PurgeArtifactsRequest', -'PurgeArtifactsResponse', -'PurgeContextsMetadata', -'PurgeContextsRequest', -'PurgeContextsResponse', -'PurgeExecutionsMetadata', -'PurgeExecutionsRequest', -'PurgeExecutionsResponse', -'PythonPackageSpec', -'QueryArtifactLineageSubgraphRequest', -'QueryContextLineageSubgraphRequest', -'QueryExecutionInputsAndOutputsRequest', -'RawPredictRequest', -'ReadFeatureValuesRequest', -'ReadFeatureValuesResponse', -'ReadTensorboardBlobDataRequest', -'ReadTensorboardBlobDataResponse', -'ReadTensorboardTimeSeriesDataRequest', -'ReadTensorboardTimeSeriesDataResponse', -'ResourcesConsumed', -'ResumeModelDeploymentMonitoringJobRequest', -'SampleConfig', -'SampledShapleyAttribution', -'SamplingStrategy', -'Scalar', -'Scheduling', -'SearchFeaturesRequest', -'SearchFeaturesResponse', -'SearchMigratableResourcesRequest', -'SearchMigratableResourcesResponse', -'SearchModelDeploymentMonitoringStatsAnomaliesRequest', -'SearchModelDeploymentMonitoringStatsAnomaliesResponse', -'SmoothGradConfig', -'SpecialistPool', -'SpecialistPoolServiceClient', -'StopTrialRequest', -'StratifiedSplit', -'StreamingReadFeatureValuesRequest', -'StringArray', -'Study', -'StudySpec', -'SuggestTrialsMetadata', -'SuggestTrialsRequest', -'SuggestTrialsResponse', -'TFRecordDestination', -'Tensorboard', -'TensorboardBlob', -'TensorboardBlobSequence', -'TensorboardExperiment', -'TensorboardRun', -'TensorboardServiceClient', -'TensorboardTensor', -'TensorboardTimeSeries', -'ThresholdConfig', -'TimeSeriesData', -'TimeSeriesDataPoint', -'TimestampSplit', -'TrainingConfig', -'TrainingPipeline', -'Trial', -'UndeployIndexOperationMetadata', -'UndeployIndexRequest', -'UndeployIndexResponse', -'UndeployModelOperationMetadata', -'UndeployModelRequest', -'UndeployModelResponse', -'UnmanagedContainerModel', -'UpdateArtifactRequest', -'UpdateContextRequest', -'UpdateDatasetRequest', -'UpdateEndpointRequest', -'UpdateEntityTypeRequest', -'UpdateExecutionRequest', -'UpdateFeatureRequest', -'UpdateFeaturestoreOperationMetadata', -'UpdateFeaturestoreRequest', -'UpdateIndexEndpointRequest', -'UpdateIndexOperationMetadata', -'UpdateIndexRequest', -'UpdateModelDeploymentMonitoringJobOperationMetadata', -'UpdateModelDeploymentMonitoringJobRequest', -'UpdateModelRequest', -'UpdateSpecialistPoolOperationMetadata', -'UpdateSpecialistPoolRequest', -'UpdateTensorboardExperimentRequest', -'UpdateTensorboardOperationMetadata', -'UpdateTensorboardRequest', -'UpdateTensorboardRunRequest', -'UpdateTensorboardTimeSeriesRequest', -'UploadModelOperationMetadata', -'UploadModelRequest', -'UploadModelResponse', -'UserActionReference', -'Value', -'VizierServiceClient', -'WorkerPoolSpec', -'WriteTensorboardExperimentDataRequest', -'WriteTensorboardExperimentDataResponse', -'WriteTensorboardRunDataRequest', -'WriteTensorboardRunDataResponse', -'XraiAttribution', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/gapic_metadata.json b/owl-bot-staging/v1/google/cloud/aiplatform_v1/gapic_metadata.json deleted file mode 100644 index b7e8b7361a..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/gapic_metadata.json +++ /dev/null @@ -1,2059 +0,0 @@ - { - "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", - "language": "python", - "libraryPackage": "google.cloud.aiplatform_v1", - "protoPackage": "google.cloud.aiplatform.v1", - "schema": "1.0", - "services": { - "DatasetService": { - "clients": { - "grpc": { - "libraryClient": "DatasetServiceClient", - "rpcs": { - "CreateDataset": { - "methods": [ - "create_dataset" - ] - }, - "DeleteDataset": { - "methods": [ - "delete_dataset" - ] - }, - "ExportData": { - "methods": [ - "export_data" - ] - }, - "GetAnnotationSpec": { - "methods": [ - "get_annotation_spec" - ] - }, - "GetDataset": { - "methods": [ - "get_dataset" - ] - }, - "ImportData": { - "methods": [ - "import_data" - ] - }, - "ListAnnotations": { - "methods": [ - "list_annotations" - ] - }, - "ListDataItems": { - "methods": [ - "list_data_items" - ] - }, - "ListDatasets": { - "methods": [ - "list_datasets" - ] - }, - "UpdateDataset": { - "methods": [ - "update_dataset" - ] - } - } - }, - "grpc-async": { - "libraryClient": "DatasetServiceAsyncClient", - "rpcs": { - "CreateDataset": { - "methods": [ - "create_dataset" - ] - }, - "DeleteDataset": { - "methods": [ - "delete_dataset" - ] - }, - "ExportData": { - "methods": [ - "export_data" - ] - }, - "GetAnnotationSpec": { - "methods": [ - "get_annotation_spec" - ] - }, - "GetDataset": { - "methods": [ - "get_dataset" - ] - }, - "ImportData": { - "methods": [ - "import_data" - ] - }, - "ListAnnotations": { - "methods": [ - "list_annotations" - ] - }, - "ListDataItems": { - "methods": [ - "list_data_items" - ] - }, - "ListDatasets": { - "methods": [ - "list_datasets" - ] - }, - "UpdateDataset": { - "methods": [ - "update_dataset" - ] - } - } - } - } - }, - "EndpointService": { - "clients": { - "grpc": { - "libraryClient": "EndpointServiceClient", - "rpcs": { - "CreateEndpoint": { - "methods": [ - "create_endpoint" - ] - }, - "DeleteEndpoint": { - "methods": [ - "delete_endpoint" - ] - }, - "DeployModel": { - "methods": [ - "deploy_model" - ] - }, - "GetEndpoint": { - "methods": [ - "get_endpoint" - ] - }, - "ListEndpoints": { - "methods": [ - "list_endpoints" - ] - }, - "UndeployModel": { - "methods": [ - "undeploy_model" - ] - }, - "UpdateEndpoint": { - "methods": [ - "update_endpoint" - ] - } - } - }, - "grpc-async": { - "libraryClient": "EndpointServiceAsyncClient", - "rpcs": { - "CreateEndpoint": { - "methods": [ - "create_endpoint" - ] - }, - "DeleteEndpoint": { - "methods": [ - "delete_endpoint" - ] - }, - "DeployModel": { - "methods": [ - "deploy_model" - ] - }, - "GetEndpoint": { - "methods": [ - "get_endpoint" - ] - }, - "ListEndpoints": { - "methods": [ - "list_endpoints" - ] - }, - "UndeployModel": { - "methods": [ - "undeploy_model" - ] - }, - "UpdateEndpoint": { - "methods": [ - "update_endpoint" - ] - } - } - } - } - }, - "FeaturestoreOnlineServingService": { - "clients": { - "grpc": { - "libraryClient": "FeaturestoreOnlineServingServiceClient", - "rpcs": { - "ReadFeatureValues": { - "methods": [ - "read_feature_values" - ] - }, - "StreamingReadFeatureValues": { - "methods": [ - "streaming_read_feature_values" - ] - } - } - }, - "grpc-async": { - "libraryClient": "FeaturestoreOnlineServingServiceAsyncClient", - "rpcs": { - "ReadFeatureValues": { - "methods": [ - "read_feature_values" - ] - }, - "StreamingReadFeatureValues": { - "methods": [ - "streaming_read_feature_values" - ] - } - } - } - } - }, - "FeaturestoreService": { - "clients": { - "grpc": { - "libraryClient": "FeaturestoreServiceClient", - "rpcs": { - "BatchCreateFeatures": { - "methods": [ - "batch_create_features" - ] - }, - "BatchReadFeatureValues": { - "methods": [ - "batch_read_feature_values" - ] - }, - "CreateEntityType": { - "methods": [ - "create_entity_type" - ] - }, - "CreateFeature": { - "methods": [ - "create_feature" - ] - }, - "CreateFeaturestore": { - "methods": [ - "create_featurestore" - ] - }, - "DeleteEntityType": { - "methods": [ - "delete_entity_type" - ] - }, - "DeleteFeature": { - "methods": [ - "delete_feature" - ] - }, - "DeleteFeaturestore": { - "methods": [ - "delete_featurestore" - ] - }, - "ExportFeatureValues": { - "methods": [ - "export_feature_values" - ] - }, - "GetEntityType": { - "methods": [ - "get_entity_type" - ] - }, - "GetFeature": { - "methods": [ - "get_feature" - ] - }, - "GetFeaturestore": { - "methods": [ - "get_featurestore" - ] - }, - "ImportFeatureValues": { - "methods": [ - "import_feature_values" - ] - }, - "ListEntityTypes": { - "methods": [ - "list_entity_types" - ] - }, - "ListFeatures": { - "methods": [ - "list_features" - ] - }, - "ListFeaturestores": { - "methods": [ - "list_featurestores" - ] - }, - "SearchFeatures": { - "methods": [ - "search_features" - ] - }, - "UpdateEntityType": { - "methods": [ - "update_entity_type" - ] - }, - "UpdateFeature": { - "methods": [ - "update_feature" - ] - }, - "UpdateFeaturestore": { - "methods": [ - "update_featurestore" - ] - } - } - }, - "grpc-async": { - "libraryClient": "FeaturestoreServiceAsyncClient", - "rpcs": { - "BatchCreateFeatures": { - "methods": [ - "batch_create_features" - ] - }, - "BatchReadFeatureValues": { - "methods": [ - "batch_read_feature_values" - ] - }, - "CreateEntityType": { - "methods": [ - "create_entity_type" - ] - }, - "CreateFeature": { - "methods": [ - "create_feature" - ] - }, - "CreateFeaturestore": { - "methods": [ - "create_featurestore" - ] - }, - "DeleteEntityType": { - "methods": [ - "delete_entity_type" - ] - }, - "DeleteFeature": { - "methods": [ - "delete_feature" - ] - }, - "DeleteFeaturestore": { - "methods": [ - "delete_featurestore" - ] - }, - "ExportFeatureValues": { - "methods": [ - "export_feature_values" - ] - }, - "GetEntityType": { - "methods": [ - "get_entity_type" - ] - }, - "GetFeature": { - "methods": [ - "get_feature" - ] - }, - "GetFeaturestore": { - "methods": [ - "get_featurestore" - ] - }, - "ImportFeatureValues": { - "methods": [ - "import_feature_values" - ] - }, - "ListEntityTypes": { - "methods": [ - "list_entity_types" - ] - }, - "ListFeatures": { - "methods": [ - "list_features" - ] - }, - "ListFeaturestores": { - "methods": [ - "list_featurestores" - ] - }, - "SearchFeatures": { - "methods": [ - "search_features" - ] - }, - "UpdateEntityType": { - "methods": [ - "update_entity_type" - ] - }, - "UpdateFeature": { - "methods": [ - "update_feature" - ] - }, - "UpdateFeaturestore": { - "methods": [ - "update_featurestore" - ] - } - } - } - } - }, - "IndexEndpointService": { - "clients": { - "grpc": { - "libraryClient": "IndexEndpointServiceClient", - "rpcs": { - "CreateIndexEndpoint": { - "methods": [ - "create_index_endpoint" - ] - }, - "DeleteIndexEndpoint": { - "methods": [ - "delete_index_endpoint" - ] - }, - "DeployIndex": { - "methods": [ - "deploy_index" - ] - }, - "GetIndexEndpoint": { - "methods": [ - "get_index_endpoint" - ] - }, - "ListIndexEndpoints": { - "methods": [ - "list_index_endpoints" - ] - }, - "MutateDeployedIndex": { - "methods": [ - "mutate_deployed_index" - ] - }, - "UndeployIndex": { - "methods": [ - "undeploy_index" - ] - }, - "UpdateIndexEndpoint": { - "methods": [ - "update_index_endpoint" - ] - } - } - }, - "grpc-async": { - "libraryClient": "IndexEndpointServiceAsyncClient", - "rpcs": { - "CreateIndexEndpoint": { - "methods": [ - "create_index_endpoint" - ] - }, - "DeleteIndexEndpoint": { - "methods": [ - "delete_index_endpoint" - ] - }, - "DeployIndex": { - "methods": [ - "deploy_index" - ] - }, - "GetIndexEndpoint": { - "methods": [ - "get_index_endpoint" - ] - }, - "ListIndexEndpoints": { - "methods": [ - "list_index_endpoints" - ] - }, - "MutateDeployedIndex": { - "methods": [ - "mutate_deployed_index" - ] - }, - "UndeployIndex": { - "methods": [ - "undeploy_index" - ] - }, - "UpdateIndexEndpoint": { - "methods": [ - "update_index_endpoint" - ] - } - } - } - } - }, - "IndexService": { - "clients": { - "grpc": { - "libraryClient": "IndexServiceClient", - "rpcs": { - "CreateIndex": { - "methods": [ - "create_index" - ] - }, - "DeleteIndex": { - "methods": [ - "delete_index" - ] - }, - "GetIndex": { - "methods": [ - "get_index" - ] - }, - "ListIndexes": { - "methods": [ - "list_indexes" - ] - }, - "UpdateIndex": { - "methods": [ - "update_index" - ] - } - } - }, - "grpc-async": { - "libraryClient": "IndexServiceAsyncClient", - "rpcs": { - "CreateIndex": { - "methods": [ - "create_index" - ] - }, - "DeleteIndex": { - "methods": [ - "delete_index" - ] - }, - "GetIndex": { - "methods": [ - "get_index" - ] - }, - "ListIndexes": { - "methods": [ - "list_indexes" - ] - }, - "UpdateIndex": { - "methods": [ - "update_index" - ] - } - } - } - } - }, - "JobService": { - "clients": { - "grpc": { - "libraryClient": "JobServiceClient", - "rpcs": { - "CancelBatchPredictionJob": { - "methods": [ - "cancel_batch_prediction_job" - ] - }, - "CancelCustomJob": { - "methods": [ - "cancel_custom_job" - ] - }, - "CancelDataLabelingJob": { - "methods": [ - "cancel_data_labeling_job" - ] - }, - "CancelHyperparameterTuningJob": { - "methods": [ - "cancel_hyperparameter_tuning_job" - ] - }, - "CreateBatchPredictionJob": { - "methods": [ - "create_batch_prediction_job" - ] - }, - "CreateCustomJob": { - "methods": [ - "create_custom_job" - ] - }, - "CreateDataLabelingJob": { - "methods": [ - "create_data_labeling_job" - ] - }, - "CreateHyperparameterTuningJob": { - "methods": [ - "create_hyperparameter_tuning_job" - ] - }, - "CreateModelDeploymentMonitoringJob": { - "methods": [ - "create_model_deployment_monitoring_job" - ] - }, - "DeleteBatchPredictionJob": { - "methods": [ - "delete_batch_prediction_job" - ] - }, - "DeleteCustomJob": { - "methods": [ - "delete_custom_job" - ] - }, - "DeleteDataLabelingJob": { - "methods": [ - "delete_data_labeling_job" - ] - }, - "DeleteHyperparameterTuningJob": { - "methods": [ - "delete_hyperparameter_tuning_job" - ] - }, - "DeleteModelDeploymentMonitoringJob": { - "methods": [ - "delete_model_deployment_monitoring_job" - ] - }, - "GetBatchPredictionJob": { - "methods": [ - "get_batch_prediction_job" - ] - }, - "GetCustomJob": { - "methods": [ - "get_custom_job" - ] - }, - "GetDataLabelingJob": { - "methods": [ - "get_data_labeling_job" - ] - }, - "GetHyperparameterTuningJob": { - "methods": [ - "get_hyperparameter_tuning_job" - ] - }, - "GetModelDeploymentMonitoringJob": { - "methods": [ - "get_model_deployment_monitoring_job" - ] - }, - "ListBatchPredictionJobs": { - "methods": [ - "list_batch_prediction_jobs" - ] - }, - "ListCustomJobs": { - "methods": [ - "list_custom_jobs" - ] - }, - "ListDataLabelingJobs": { - "methods": [ - "list_data_labeling_jobs" - ] - }, - "ListHyperparameterTuningJobs": { - "methods": [ - "list_hyperparameter_tuning_jobs" - ] - }, - "ListModelDeploymentMonitoringJobs": { - "methods": [ - "list_model_deployment_monitoring_jobs" - ] - }, - "PauseModelDeploymentMonitoringJob": { - "methods": [ - "pause_model_deployment_monitoring_job" - ] - }, - "ResumeModelDeploymentMonitoringJob": { - "methods": [ - "resume_model_deployment_monitoring_job" - ] - }, - "SearchModelDeploymentMonitoringStatsAnomalies": { - "methods": [ - "search_model_deployment_monitoring_stats_anomalies" - ] - }, - "UpdateModelDeploymentMonitoringJob": { - "methods": [ - "update_model_deployment_monitoring_job" - ] - } - } - }, - "grpc-async": { - "libraryClient": "JobServiceAsyncClient", - "rpcs": { - "CancelBatchPredictionJob": { - "methods": [ - "cancel_batch_prediction_job" - ] - }, - "CancelCustomJob": { - "methods": [ - "cancel_custom_job" - ] - }, - "CancelDataLabelingJob": { - "methods": [ - "cancel_data_labeling_job" - ] - }, - "CancelHyperparameterTuningJob": { - "methods": [ - "cancel_hyperparameter_tuning_job" - ] - }, - "CreateBatchPredictionJob": { - "methods": [ - "create_batch_prediction_job" - ] - }, - "CreateCustomJob": { - "methods": [ - "create_custom_job" - ] - }, - "CreateDataLabelingJob": { - "methods": [ - "create_data_labeling_job" - ] - }, - "CreateHyperparameterTuningJob": { - "methods": [ - "create_hyperparameter_tuning_job" - ] - }, - "CreateModelDeploymentMonitoringJob": { - "methods": [ - "create_model_deployment_monitoring_job" - ] - }, - "DeleteBatchPredictionJob": { - "methods": [ - "delete_batch_prediction_job" - ] - }, - "DeleteCustomJob": { - "methods": [ - "delete_custom_job" - ] - }, - "DeleteDataLabelingJob": { - "methods": [ - "delete_data_labeling_job" - ] - }, - "DeleteHyperparameterTuningJob": { - "methods": [ - "delete_hyperparameter_tuning_job" - ] - }, - "DeleteModelDeploymentMonitoringJob": { - "methods": [ - "delete_model_deployment_monitoring_job" - ] - }, - "GetBatchPredictionJob": { - "methods": [ - "get_batch_prediction_job" - ] - }, - "GetCustomJob": { - "methods": [ - "get_custom_job" - ] - }, - "GetDataLabelingJob": { - "methods": [ - "get_data_labeling_job" - ] - }, - "GetHyperparameterTuningJob": { - "methods": [ - "get_hyperparameter_tuning_job" - ] - }, - "GetModelDeploymentMonitoringJob": { - "methods": [ - "get_model_deployment_monitoring_job" - ] - }, - "ListBatchPredictionJobs": { - "methods": [ - "list_batch_prediction_jobs" - ] - }, - "ListCustomJobs": { - "methods": [ - "list_custom_jobs" - ] - }, - "ListDataLabelingJobs": { - "methods": [ - "list_data_labeling_jobs" - ] - }, - "ListHyperparameterTuningJobs": { - "methods": [ - "list_hyperparameter_tuning_jobs" - ] - }, - "ListModelDeploymentMonitoringJobs": { - "methods": [ - "list_model_deployment_monitoring_jobs" - ] - }, - "PauseModelDeploymentMonitoringJob": { - "methods": [ - "pause_model_deployment_monitoring_job" - ] - }, - "ResumeModelDeploymentMonitoringJob": { - "methods": [ - "resume_model_deployment_monitoring_job" - ] - }, - "SearchModelDeploymentMonitoringStatsAnomalies": { - "methods": [ - "search_model_deployment_monitoring_stats_anomalies" - ] - }, - "UpdateModelDeploymentMonitoringJob": { - "methods": [ - "update_model_deployment_monitoring_job" - ] - } - } - } - } - }, - "MetadataService": { - "clients": { - "grpc": { - "libraryClient": "MetadataServiceClient", - "rpcs": { - "AddContextArtifactsAndExecutions": { - "methods": [ - "add_context_artifacts_and_executions" - ] - }, - "AddContextChildren": { - "methods": [ - "add_context_children" - ] - }, - "AddExecutionEvents": { - "methods": [ - "add_execution_events" - ] - }, - "CreateArtifact": { - "methods": [ - "create_artifact" - ] - }, - "CreateContext": { - "methods": [ - "create_context" - ] - }, - "CreateExecution": { - "methods": [ - "create_execution" - ] - }, - "CreateMetadataSchema": { - "methods": [ - "create_metadata_schema" - ] - }, - "CreateMetadataStore": { - "methods": [ - "create_metadata_store" - ] - }, - "DeleteArtifact": { - "methods": [ - "delete_artifact" - ] - }, - "DeleteContext": { - "methods": [ - "delete_context" - ] - }, - "DeleteExecution": { - "methods": [ - "delete_execution" - ] - }, - "DeleteMetadataStore": { - "methods": [ - "delete_metadata_store" - ] - }, - "GetArtifact": { - "methods": [ - "get_artifact" - ] - }, - "GetContext": { - "methods": [ - "get_context" - ] - }, - "GetExecution": { - "methods": [ - "get_execution" - ] - }, - "GetMetadataSchema": { - "methods": [ - "get_metadata_schema" - ] - }, - "GetMetadataStore": { - "methods": [ - "get_metadata_store" - ] - }, - "ListArtifacts": { - "methods": [ - "list_artifacts" - ] - }, - "ListContexts": { - "methods": [ - "list_contexts" - ] - }, - "ListExecutions": { - "methods": [ - "list_executions" - ] - }, - "ListMetadataSchemas": { - "methods": [ - "list_metadata_schemas" - ] - }, - "ListMetadataStores": { - "methods": [ - "list_metadata_stores" - ] - }, - "PurgeArtifacts": { - "methods": [ - "purge_artifacts" - ] - }, - "PurgeContexts": { - "methods": [ - "purge_contexts" - ] - }, - "PurgeExecutions": { - "methods": [ - "purge_executions" - ] - }, - "QueryArtifactLineageSubgraph": { - "methods": [ - "query_artifact_lineage_subgraph" - ] - }, - "QueryContextLineageSubgraph": { - "methods": [ - "query_context_lineage_subgraph" - ] - }, - "QueryExecutionInputsAndOutputs": { - "methods": [ - "query_execution_inputs_and_outputs" - ] - }, - "UpdateArtifact": { - "methods": [ - "update_artifact" - ] - }, - "UpdateContext": { - "methods": [ - "update_context" - ] - }, - "UpdateExecution": { - "methods": [ - "update_execution" - ] - } - } - }, - "grpc-async": { - "libraryClient": "MetadataServiceAsyncClient", - "rpcs": { - "AddContextArtifactsAndExecutions": { - "methods": [ - "add_context_artifacts_and_executions" - ] - }, - "AddContextChildren": { - "methods": [ - "add_context_children" - ] - }, - "AddExecutionEvents": { - "methods": [ - "add_execution_events" - ] - }, - "CreateArtifact": { - "methods": [ - "create_artifact" - ] - }, - "CreateContext": { - "methods": [ - "create_context" - ] - }, - "CreateExecution": { - "methods": [ - "create_execution" - ] - }, - "CreateMetadataSchema": { - "methods": [ - "create_metadata_schema" - ] - }, - "CreateMetadataStore": { - "methods": [ - "create_metadata_store" - ] - }, - "DeleteArtifact": { - "methods": [ - "delete_artifact" - ] - }, - "DeleteContext": { - "methods": [ - "delete_context" - ] - }, - "DeleteExecution": { - "methods": [ - "delete_execution" - ] - }, - "DeleteMetadataStore": { - "methods": [ - "delete_metadata_store" - ] - }, - "GetArtifact": { - "methods": [ - "get_artifact" - ] - }, - "GetContext": { - "methods": [ - "get_context" - ] - }, - "GetExecution": { - "methods": [ - "get_execution" - ] - }, - "GetMetadataSchema": { - "methods": [ - "get_metadata_schema" - ] - }, - "GetMetadataStore": { - "methods": [ - "get_metadata_store" - ] - }, - "ListArtifacts": { - "methods": [ - "list_artifacts" - ] - }, - "ListContexts": { - "methods": [ - "list_contexts" - ] - }, - "ListExecutions": { - "methods": [ - "list_executions" - ] - }, - "ListMetadataSchemas": { - "methods": [ - "list_metadata_schemas" - ] - }, - "ListMetadataStores": { - "methods": [ - "list_metadata_stores" - ] - }, - "PurgeArtifacts": { - "methods": [ - "purge_artifacts" - ] - }, - "PurgeContexts": { - "methods": [ - "purge_contexts" - ] - }, - "PurgeExecutions": { - "methods": [ - "purge_executions" - ] - }, - "QueryArtifactLineageSubgraph": { - "methods": [ - "query_artifact_lineage_subgraph" - ] - }, - "QueryContextLineageSubgraph": { - "methods": [ - "query_context_lineage_subgraph" - ] - }, - "QueryExecutionInputsAndOutputs": { - "methods": [ - "query_execution_inputs_and_outputs" - ] - }, - "UpdateArtifact": { - "methods": [ - "update_artifact" - ] - }, - "UpdateContext": { - "methods": [ - "update_context" - ] - }, - "UpdateExecution": { - "methods": [ - "update_execution" - ] - } - } - } - } - }, - "MigrationService": { - "clients": { - "grpc": { - "libraryClient": "MigrationServiceClient", - "rpcs": { - "BatchMigrateResources": { - "methods": [ - "batch_migrate_resources" - ] - }, - "SearchMigratableResources": { - "methods": [ - "search_migratable_resources" - ] - } - } - }, - "grpc-async": { - "libraryClient": "MigrationServiceAsyncClient", - "rpcs": { - "BatchMigrateResources": { - "methods": [ - "batch_migrate_resources" - ] - }, - "SearchMigratableResources": { - "methods": [ - "search_migratable_resources" - ] - } - } - } - } - }, - "ModelService": { - "clients": { - "grpc": { - "libraryClient": "ModelServiceClient", - "rpcs": { - "DeleteModel": { - "methods": [ - "delete_model" - ] - }, - "ExportModel": { - "methods": [ - "export_model" - ] - }, - "GetModel": { - "methods": [ - "get_model" - ] - }, - "GetModelEvaluation": { - "methods": [ - "get_model_evaluation" - ] - }, - "GetModelEvaluationSlice": { - "methods": [ - "get_model_evaluation_slice" - ] - }, - "ListModelEvaluationSlices": { - "methods": [ - "list_model_evaluation_slices" - ] - }, - "ListModelEvaluations": { - "methods": [ - "list_model_evaluations" - ] - }, - "ListModels": { - "methods": [ - "list_models" - ] - }, - "UpdateModel": { - "methods": [ - "update_model" - ] - }, - "UploadModel": { - "methods": [ - "upload_model" - ] - } - } - }, - "grpc-async": { - "libraryClient": "ModelServiceAsyncClient", - "rpcs": { - "DeleteModel": { - "methods": [ - "delete_model" - ] - }, - "ExportModel": { - "methods": [ - "export_model" - ] - }, - "GetModel": { - "methods": [ - "get_model" - ] - }, - "GetModelEvaluation": { - "methods": [ - "get_model_evaluation" - ] - }, - "GetModelEvaluationSlice": { - "methods": [ - "get_model_evaluation_slice" - ] - }, - "ListModelEvaluationSlices": { - "methods": [ - "list_model_evaluation_slices" - ] - }, - "ListModelEvaluations": { - "methods": [ - "list_model_evaluations" - ] - }, - "ListModels": { - "methods": [ - "list_models" - ] - }, - "UpdateModel": { - "methods": [ - "update_model" - ] - }, - "UploadModel": { - "methods": [ - "upload_model" - ] - } - } - } - } - }, - "PipelineService": { - "clients": { - "grpc": { - "libraryClient": "PipelineServiceClient", - "rpcs": { - "CancelPipelineJob": { - "methods": [ - "cancel_pipeline_job" - ] - }, - "CancelTrainingPipeline": { - "methods": [ - "cancel_training_pipeline" - ] - }, - "CreatePipelineJob": { - "methods": [ - "create_pipeline_job" - ] - }, - "CreateTrainingPipeline": { - "methods": [ - "create_training_pipeline" - ] - }, - "DeletePipelineJob": { - "methods": [ - "delete_pipeline_job" - ] - }, - "DeleteTrainingPipeline": { - "methods": [ - "delete_training_pipeline" - ] - }, - "GetPipelineJob": { - "methods": [ - "get_pipeline_job" - ] - }, - "GetTrainingPipeline": { - "methods": [ - "get_training_pipeline" - ] - }, - "ListPipelineJobs": { - "methods": [ - "list_pipeline_jobs" - ] - }, - "ListTrainingPipelines": { - "methods": [ - "list_training_pipelines" - ] - } - } - }, - "grpc-async": { - "libraryClient": "PipelineServiceAsyncClient", - "rpcs": { - "CancelPipelineJob": { - "methods": [ - "cancel_pipeline_job" - ] - }, - "CancelTrainingPipeline": { - "methods": [ - "cancel_training_pipeline" - ] - }, - "CreatePipelineJob": { - "methods": [ - "create_pipeline_job" - ] - }, - "CreateTrainingPipeline": { - "methods": [ - "create_training_pipeline" - ] - }, - "DeletePipelineJob": { - "methods": [ - "delete_pipeline_job" - ] - }, - "DeleteTrainingPipeline": { - "methods": [ - "delete_training_pipeline" - ] - }, - "GetPipelineJob": { - "methods": [ - "get_pipeline_job" - ] - }, - "GetTrainingPipeline": { - "methods": [ - "get_training_pipeline" - ] - }, - "ListPipelineJobs": { - "methods": [ - "list_pipeline_jobs" - ] - }, - "ListTrainingPipelines": { - "methods": [ - "list_training_pipelines" - ] - } - } - } - } - }, - "PredictionService": { - "clients": { - "grpc": { - "libraryClient": "PredictionServiceClient", - "rpcs": { - "Explain": { - "methods": [ - "explain" - ] - }, - "Predict": { - "methods": [ - "predict" - ] - }, - "RawPredict": { - "methods": [ - "raw_predict" - ] - } - } - }, - "grpc-async": { - "libraryClient": "PredictionServiceAsyncClient", - "rpcs": { - "Explain": { - "methods": [ - "explain" - ] - }, - "Predict": { - "methods": [ - "predict" - ] - }, - "RawPredict": { - "methods": [ - "raw_predict" - ] - } - } - } - } - }, - "SpecialistPoolService": { - "clients": { - "grpc": { - "libraryClient": "SpecialistPoolServiceClient", - "rpcs": { - "CreateSpecialistPool": { - "methods": [ - "create_specialist_pool" - ] - }, - "DeleteSpecialistPool": { - "methods": [ - "delete_specialist_pool" - ] - }, - "GetSpecialistPool": { - "methods": [ - "get_specialist_pool" - ] - }, - "ListSpecialistPools": { - "methods": [ - "list_specialist_pools" - ] - }, - "UpdateSpecialistPool": { - "methods": [ - "update_specialist_pool" - ] - } - } - }, - "grpc-async": { - "libraryClient": "SpecialistPoolServiceAsyncClient", - "rpcs": { - "CreateSpecialistPool": { - "methods": [ - "create_specialist_pool" - ] - }, - "DeleteSpecialistPool": { - "methods": [ - "delete_specialist_pool" - ] - }, - "GetSpecialistPool": { - "methods": [ - "get_specialist_pool" - ] - }, - "ListSpecialistPools": { - "methods": [ - "list_specialist_pools" - ] - }, - "UpdateSpecialistPool": { - "methods": [ - "update_specialist_pool" - ] - } - } - } - } - }, - "TensorboardService": { - "clients": { - "grpc": { - "libraryClient": "TensorboardServiceClient", - "rpcs": { - "BatchCreateTensorboardRuns": { - "methods": [ - "batch_create_tensorboard_runs" - ] - }, - "BatchCreateTensorboardTimeSeries": { - "methods": [ - "batch_create_tensorboard_time_series" - ] - }, - "BatchReadTensorboardTimeSeriesData": { - "methods": [ - "batch_read_tensorboard_time_series_data" - ] - }, - "CreateTensorboard": { - "methods": [ - "create_tensorboard" - ] - }, - "CreateTensorboardExperiment": { - "methods": [ - "create_tensorboard_experiment" - ] - }, - "CreateTensorboardRun": { - "methods": [ - "create_tensorboard_run" - ] - }, - "CreateTensorboardTimeSeries": { - "methods": [ - "create_tensorboard_time_series" - ] - }, - "DeleteTensorboard": { - "methods": [ - "delete_tensorboard" - ] - }, - "DeleteTensorboardExperiment": { - "methods": [ - "delete_tensorboard_experiment" - ] - }, - "DeleteTensorboardRun": { - "methods": [ - "delete_tensorboard_run" - ] - }, - "DeleteTensorboardTimeSeries": { - "methods": [ - "delete_tensorboard_time_series" - ] - }, - "ExportTensorboardTimeSeriesData": { - "methods": [ - "export_tensorboard_time_series_data" - ] - }, - "GetTensorboard": { - "methods": [ - "get_tensorboard" - ] - }, - "GetTensorboardExperiment": { - "methods": [ - "get_tensorboard_experiment" - ] - }, - "GetTensorboardRun": { - "methods": [ - "get_tensorboard_run" - ] - }, - "GetTensorboardTimeSeries": { - "methods": [ - "get_tensorboard_time_series" - ] - }, - "ListTensorboardExperiments": { - "methods": [ - "list_tensorboard_experiments" - ] - }, - "ListTensorboardRuns": { - "methods": [ - "list_tensorboard_runs" - ] - }, - "ListTensorboardTimeSeries": { - "methods": [ - "list_tensorboard_time_series" - ] - }, - "ListTensorboards": { - "methods": [ - "list_tensorboards" - ] - }, - "ReadTensorboardBlobData": { - "methods": [ - "read_tensorboard_blob_data" - ] - }, - "ReadTensorboardTimeSeriesData": { - "methods": [ - "read_tensorboard_time_series_data" - ] - }, - "UpdateTensorboard": { - "methods": [ - "update_tensorboard" - ] - }, - "UpdateTensorboardExperiment": { - "methods": [ - "update_tensorboard_experiment" - ] - }, - "UpdateTensorboardRun": { - "methods": [ - "update_tensorboard_run" - ] - }, - "UpdateTensorboardTimeSeries": { - "methods": [ - "update_tensorboard_time_series" - ] - }, - "WriteTensorboardExperimentData": { - "methods": [ - "write_tensorboard_experiment_data" - ] - }, - "WriteTensorboardRunData": { - "methods": [ - "write_tensorboard_run_data" - ] - } - } - }, - "grpc-async": { - "libraryClient": "TensorboardServiceAsyncClient", - "rpcs": { - "BatchCreateTensorboardRuns": { - "methods": [ - "batch_create_tensorboard_runs" - ] - }, - "BatchCreateTensorboardTimeSeries": { - "methods": [ - "batch_create_tensorboard_time_series" - ] - }, - "BatchReadTensorboardTimeSeriesData": { - "methods": [ - "batch_read_tensorboard_time_series_data" - ] - }, - "CreateTensorboard": { - "methods": [ - "create_tensorboard" - ] - }, - "CreateTensorboardExperiment": { - "methods": [ - "create_tensorboard_experiment" - ] - }, - "CreateTensorboardRun": { - "methods": [ - "create_tensorboard_run" - ] - }, - "CreateTensorboardTimeSeries": { - "methods": [ - "create_tensorboard_time_series" - ] - }, - "DeleteTensorboard": { - "methods": [ - "delete_tensorboard" - ] - }, - "DeleteTensorboardExperiment": { - "methods": [ - "delete_tensorboard_experiment" - ] - }, - "DeleteTensorboardRun": { - "methods": [ - "delete_tensorboard_run" - ] - }, - "DeleteTensorboardTimeSeries": { - "methods": [ - "delete_tensorboard_time_series" - ] - }, - "ExportTensorboardTimeSeriesData": { - "methods": [ - "export_tensorboard_time_series_data" - ] - }, - "GetTensorboard": { - "methods": [ - "get_tensorboard" - ] - }, - "GetTensorboardExperiment": { - "methods": [ - "get_tensorboard_experiment" - ] - }, - "GetTensorboardRun": { - "methods": [ - "get_tensorboard_run" - ] - }, - "GetTensorboardTimeSeries": { - "methods": [ - "get_tensorboard_time_series" - ] - }, - "ListTensorboardExperiments": { - "methods": [ - "list_tensorboard_experiments" - ] - }, - "ListTensorboardRuns": { - "methods": [ - "list_tensorboard_runs" - ] - }, - "ListTensorboardTimeSeries": { - "methods": [ - "list_tensorboard_time_series" - ] - }, - "ListTensorboards": { - "methods": [ - "list_tensorboards" - ] - }, - "ReadTensorboardBlobData": { - "methods": [ - "read_tensorboard_blob_data" - ] - }, - "ReadTensorboardTimeSeriesData": { - "methods": [ - "read_tensorboard_time_series_data" - ] - }, - "UpdateTensorboard": { - "methods": [ - "update_tensorboard" - ] - }, - "UpdateTensorboardExperiment": { - "methods": [ - "update_tensorboard_experiment" - ] - }, - "UpdateTensorboardRun": { - "methods": [ - "update_tensorboard_run" - ] - }, - "UpdateTensorboardTimeSeries": { - "methods": [ - "update_tensorboard_time_series" - ] - }, - "WriteTensorboardExperimentData": { - "methods": [ - "write_tensorboard_experiment_data" - ] - }, - "WriteTensorboardRunData": { - "methods": [ - "write_tensorboard_run_data" - ] - } - } - } - } - }, - "VizierService": { - "clients": { - "grpc": { - "libraryClient": "VizierServiceClient", - "rpcs": { - "AddTrialMeasurement": { - "methods": [ - "add_trial_measurement" - ] - }, - "CheckTrialEarlyStoppingState": { - "methods": [ - "check_trial_early_stopping_state" - ] - }, - "CompleteTrial": { - "methods": [ - "complete_trial" - ] - }, - "CreateStudy": { - "methods": [ - "create_study" - ] - }, - "CreateTrial": { - "methods": [ - "create_trial" - ] - }, - "DeleteStudy": { - "methods": [ - "delete_study" - ] - }, - "DeleteTrial": { - "methods": [ - "delete_trial" - ] - }, - "GetStudy": { - "methods": [ - "get_study" - ] - }, - "GetTrial": { - "methods": [ - "get_trial" - ] - }, - "ListOptimalTrials": { - "methods": [ - "list_optimal_trials" - ] - }, - "ListStudies": { - "methods": [ - "list_studies" - ] - }, - "ListTrials": { - "methods": [ - "list_trials" - ] - }, - "LookupStudy": { - "methods": [ - "lookup_study" - ] - }, - "StopTrial": { - "methods": [ - "stop_trial" - ] - }, - "SuggestTrials": { - "methods": [ - "suggest_trials" - ] - } - } - }, - "grpc-async": { - "libraryClient": "VizierServiceAsyncClient", - "rpcs": { - "AddTrialMeasurement": { - "methods": [ - "add_trial_measurement" - ] - }, - "CheckTrialEarlyStoppingState": { - "methods": [ - "check_trial_early_stopping_state" - ] - }, - "CompleteTrial": { - "methods": [ - "complete_trial" - ] - }, - "CreateStudy": { - "methods": [ - "create_study" - ] - }, - "CreateTrial": { - "methods": [ - "create_trial" - ] - }, - "DeleteStudy": { - "methods": [ - "delete_study" - ] - }, - "DeleteTrial": { - "methods": [ - "delete_trial" - ] - }, - "GetStudy": { - "methods": [ - "get_study" - ] - }, - "GetTrial": { - "methods": [ - "get_trial" - ] - }, - "ListOptimalTrials": { - "methods": [ - "list_optimal_trials" - ] - }, - "ListStudies": { - "methods": [ - "list_studies" - ] - }, - "ListTrials": { - "methods": [ - "list_trials" - ] - }, - "LookupStudy": { - "methods": [ - "lookup_study" - ] - }, - "StopTrial": { - "methods": [ - "stop_trial" - ] - }, - "SuggestTrials": { - "methods": [ - "suggest_trials" - ] - } - } - } - } - } - } -} diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/py.typed b/owl-bot-staging/v1/google/cloud/aiplatform_v1/py.typed deleted file mode 100644 index 228f1c51c6..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-aiplatform package uses inline types. diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/__init__.py deleted file mode 100644 index 4de65971c2..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/__init__.py deleted file mode 100644 index 44e8fb2115..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import DatasetServiceClient -from .async_client import DatasetServiceAsyncClient - -__all__ = ( - 'DatasetServiceClient', - 'DatasetServiceAsyncClient', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/async_client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/async_client.py deleted file mode 100644 index 55ec5bb55b..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/async_client.py +++ /dev/null @@ -1,1083 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core.client_options import ClientOptions -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1.services.dataset_service import pagers -from google.cloud.aiplatform_v1.types import annotation -from google.cloud.aiplatform_v1.types import annotation_spec -from google.cloud.aiplatform_v1.types import data_item -from google.cloud.aiplatform_v1.types import dataset -from google.cloud.aiplatform_v1.types import dataset as gca_dataset -from google.cloud.aiplatform_v1.types import dataset_service -from google.cloud.aiplatform_v1.types import encryption_spec -from google.cloud.aiplatform_v1.types import operation as gca_operation -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import DatasetServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import DatasetServiceGrpcAsyncIOTransport -from .client import DatasetServiceClient - - -class DatasetServiceAsyncClient: - """The service that handles the CRUD of Vertex AI Dataset and - its child resources. - """ - - _client: DatasetServiceClient - - DEFAULT_ENDPOINT = DatasetServiceClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = DatasetServiceClient.DEFAULT_MTLS_ENDPOINT - - annotation_path = staticmethod(DatasetServiceClient.annotation_path) - parse_annotation_path = staticmethod(DatasetServiceClient.parse_annotation_path) - annotation_spec_path = staticmethod(DatasetServiceClient.annotation_spec_path) - parse_annotation_spec_path = staticmethod(DatasetServiceClient.parse_annotation_spec_path) - data_item_path = staticmethod(DatasetServiceClient.data_item_path) - parse_data_item_path = staticmethod(DatasetServiceClient.parse_data_item_path) - dataset_path = staticmethod(DatasetServiceClient.dataset_path) - parse_dataset_path = staticmethod(DatasetServiceClient.parse_dataset_path) - common_billing_account_path = staticmethod(DatasetServiceClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(DatasetServiceClient.parse_common_billing_account_path) - common_folder_path = staticmethod(DatasetServiceClient.common_folder_path) - parse_common_folder_path = staticmethod(DatasetServiceClient.parse_common_folder_path) - common_organization_path = staticmethod(DatasetServiceClient.common_organization_path) - parse_common_organization_path = staticmethod(DatasetServiceClient.parse_common_organization_path) - common_project_path = staticmethod(DatasetServiceClient.common_project_path) - parse_common_project_path = staticmethod(DatasetServiceClient.parse_common_project_path) - common_location_path = staticmethod(DatasetServiceClient.common_location_path) - parse_common_location_path = staticmethod(DatasetServiceClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - DatasetServiceAsyncClient: The constructed client. - """ - return DatasetServiceClient.from_service_account_info.__func__(DatasetServiceAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - DatasetServiceAsyncClient: The constructed client. - """ - return DatasetServiceClient.from_service_account_file.__func__(DatasetServiceAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> DatasetServiceTransport: - """Returns the transport used by the client instance. - - Returns: - DatasetServiceTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(DatasetServiceClient).get_transport_class, type(DatasetServiceClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, DatasetServiceTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the dataset service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.DatasetServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = DatasetServiceClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def create_dataset(self, - request: Union[dataset_service.CreateDatasetRequest, dict] = None, - *, - parent: str = None, - dataset: gca_dataset.Dataset = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Creates a Dataset. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CreateDatasetRequest, dict]): - The request object. Request message for - [DatasetService.CreateDataset][google.cloud.aiplatform.v1.DatasetService.CreateDataset]. - parent (:class:`str`): - Required. The resource name of the Location to create - the Dataset in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - dataset (:class:`google.cloud.aiplatform_v1.types.Dataset`): - Required. The Dataset to create. - This corresponds to the ``dataset`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1.types.Dataset` A - collection of DataItems and Annotations on them. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, dataset]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = dataset_service.CreateDatasetRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if dataset is not None: - request.dataset = dataset - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_dataset, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - gca_dataset.Dataset, - metadata_type=dataset_service.CreateDatasetOperationMetadata, - ) - - # Done; return the response. - return response - - async def get_dataset(self, - request: Union[dataset_service.GetDatasetRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> dataset.Dataset: - r"""Gets a Dataset. - - Args: - request (Union[google.cloud.aiplatform_v1.types.GetDatasetRequest, dict]): - The request object. Request message for - [DatasetService.GetDataset][google.cloud.aiplatform.v1.DatasetService.GetDataset]. - name (:class:`str`): - Required. The name of the Dataset - resource. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Dataset: - A collection of DataItems and - Annotations on them. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = dataset_service.GetDatasetRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_dataset, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_dataset(self, - request: Union[dataset_service.UpdateDatasetRequest, dict] = None, - *, - dataset: gca_dataset.Dataset = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_dataset.Dataset: - r"""Updates a Dataset. - - Args: - request (Union[google.cloud.aiplatform_v1.types.UpdateDatasetRequest, dict]): - The request object. Request message for - [DatasetService.UpdateDataset][google.cloud.aiplatform.v1.DatasetService.UpdateDataset]. - dataset (:class:`google.cloud.aiplatform_v1.types.Dataset`): - Required. The Dataset which replaces - the resource on the server. - - This corresponds to the ``dataset`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. The update mask applies to the resource. For - the ``FieldMask`` definition, see - [google.protobuf.FieldMask][google.protobuf.FieldMask]. - Updatable fields: - - - ``display_name`` - - ``description`` - - ``labels`` - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Dataset: - A collection of DataItems and - Annotations on them. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([dataset, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = dataset_service.UpdateDatasetRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if dataset is not None: - request.dataset = dataset - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_dataset, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("dataset.name", request.dataset.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_datasets(self, - request: Union[dataset_service.ListDatasetsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListDatasetsAsyncPager: - r"""Lists Datasets in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListDatasetsRequest, dict]): - The request object. Request message for - [DatasetService.ListDatasets][google.cloud.aiplatform.v1.DatasetService.ListDatasets]. - parent (:class:`str`): - Required. The name of the Dataset's parent resource. - Format: ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.dataset_service.pagers.ListDatasetsAsyncPager: - Response message for - [DatasetService.ListDatasets][google.cloud.aiplatform.v1.DatasetService.ListDatasets]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = dataset_service.ListDatasetsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_datasets, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListDatasetsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_dataset(self, - request: Union[dataset_service.DeleteDatasetRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a Dataset. - - Args: - request (Union[google.cloud.aiplatform_v1.types.DeleteDatasetRequest, dict]): - The request object. Request message for - [DatasetService.DeleteDataset][google.cloud.aiplatform.v1.DatasetService.DeleteDataset]. - name (:class:`str`): - Required. The resource name of the Dataset to delete. - Format: - ``projects/{project}/locations/{location}/datasets/{dataset}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = dataset_service.DeleteDatasetRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_dataset, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def import_data(self, - request: Union[dataset_service.ImportDataRequest, dict] = None, - *, - name: str = None, - import_configs: Sequence[dataset.ImportDataConfig] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Imports data into a Dataset. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ImportDataRequest, dict]): - The request object. Request message for - [DatasetService.ImportData][google.cloud.aiplatform.v1.DatasetService.ImportData]. - name (:class:`str`): - Required. The name of the Dataset resource. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - import_configs (:class:`Sequence[google.cloud.aiplatform_v1.types.ImportDataConfig]`): - Required. The desired input - locations. The contents of all input - locations will be imported in one batch. - - This corresponds to the ``import_configs`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1.types.ImportDataResponse` - Response message for - [DatasetService.ImportData][google.cloud.aiplatform.v1.DatasetService.ImportData]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, import_configs]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = dataset_service.ImportDataRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if import_configs: - request.import_configs.extend(import_configs) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.import_data, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - dataset_service.ImportDataResponse, - metadata_type=dataset_service.ImportDataOperationMetadata, - ) - - # Done; return the response. - return response - - async def export_data(self, - request: Union[dataset_service.ExportDataRequest, dict] = None, - *, - name: str = None, - export_config: dataset.ExportDataConfig = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Exports data from a Dataset. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ExportDataRequest, dict]): - The request object. Request message for - [DatasetService.ExportData][google.cloud.aiplatform.v1.DatasetService.ExportData]. - name (:class:`str`): - Required. The name of the Dataset resource. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - export_config (:class:`google.cloud.aiplatform_v1.types.ExportDataConfig`): - Required. The desired output - location. - - This corresponds to the ``export_config`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1.types.ExportDataResponse` - Response message for - [DatasetService.ExportData][google.cloud.aiplatform.v1.DatasetService.ExportData]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, export_config]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = dataset_service.ExportDataRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if export_config is not None: - request.export_config = export_config - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.export_data, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - dataset_service.ExportDataResponse, - metadata_type=dataset_service.ExportDataOperationMetadata, - ) - - # Done; return the response. - return response - - async def list_data_items(self, - request: Union[dataset_service.ListDataItemsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListDataItemsAsyncPager: - r"""Lists DataItems in a Dataset. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListDataItemsRequest, dict]): - The request object. Request message for - [DatasetService.ListDataItems][google.cloud.aiplatform.v1.DatasetService.ListDataItems]. - parent (:class:`str`): - Required. The resource name of the Dataset to list - DataItems from. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.dataset_service.pagers.ListDataItemsAsyncPager: - Response message for - [DatasetService.ListDataItems][google.cloud.aiplatform.v1.DatasetService.ListDataItems]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = dataset_service.ListDataItemsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_data_items, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListDataItemsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_annotation_spec(self, - request: Union[dataset_service.GetAnnotationSpecRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> annotation_spec.AnnotationSpec: - r"""Gets an AnnotationSpec. - - Args: - request (Union[google.cloud.aiplatform_v1.types.GetAnnotationSpecRequest, dict]): - The request object. Request message for - [DatasetService.GetAnnotationSpec][google.cloud.aiplatform.v1.DatasetService.GetAnnotationSpec]. - name (:class:`str`): - Required. The name of the AnnotationSpec resource. - Format: - ``projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.AnnotationSpec: - Identifies a concept with which - DataItems may be annotated with. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = dataset_service.GetAnnotationSpecRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_annotation_spec, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_annotations(self, - request: Union[dataset_service.ListAnnotationsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListAnnotationsAsyncPager: - r"""Lists Annotations belongs to a dataitem - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListAnnotationsRequest, dict]): - The request object. Request message for - [DatasetService.ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations]. - parent (:class:`str`): - Required. The resource name of the DataItem to list - Annotations from. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.dataset_service.pagers.ListAnnotationsAsyncPager: - Response message for - [DatasetService.ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = dataset_service.ListAnnotationsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_annotations, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListAnnotationsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def __aenter__(self): - return self - - async def __aexit__(self, exc_type, exc, tb): - await self.transport.close() - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "DatasetServiceAsyncClient", -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/client.py deleted file mode 100644 index 8d743c8e05..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/client.py +++ /dev/null @@ -1,1308 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import os -import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1.services.dataset_service import pagers -from google.cloud.aiplatform_v1.types import annotation -from google.cloud.aiplatform_v1.types import annotation_spec -from google.cloud.aiplatform_v1.types import data_item -from google.cloud.aiplatform_v1.types import dataset -from google.cloud.aiplatform_v1.types import dataset as gca_dataset -from google.cloud.aiplatform_v1.types import dataset_service -from google.cloud.aiplatform_v1.types import encryption_spec -from google.cloud.aiplatform_v1.types import operation as gca_operation -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import DatasetServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import DatasetServiceGrpcTransport -from .transports.grpc_asyncio import DatasetServiceGrpcAsyncIOTransport - - -class DatasetServiceClientMeta(type): - """Metaclass for the DatasetService client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[DatasetServiceTransport]] - _transport_registry["grpc"] = DatasetServiceGrpcTransport - _transport_registry["grpc_asyncio"] = DatasetServiceGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[DatasetServiceTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class DatasetServiceClient(metaclass=DatasetServiceClientMeta): - """The service that handles the CRUD of Vertex AI Dataset and - its child resources. - """ - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "aiplatform.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - DatasetServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - DatasetServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> DatasetServiceTransport: - """Returns the transport used by the client instance. - - Returns: - DatasetServiceTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def annotation_path(project: str,location: str,dataset: str,data_item: str,annotation: str,) -> str: - """Returns a fully-qualified annotation string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}/annotations/{annotation}".format(project=project, location=location, dataset=dataset, data_item=data_item, annotation=annotation, ) - - @staticmethod - def parse_annotation_path(path: str) -> Dict[str,str]: - """Parses a annotation path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)/dataItems/(?P.+?)/annotations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def annotation_spec_path(project: str,location: str,dataset: str,annotation_spec: str,) -> str: - """Returns a fully-qualified annotation_spec string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}".format(project=project, location=location, dataset=dataset, annotation_spec=annotation_spec, ) - - @staticmethod - def parse_annotation_spec_path(path: str) -> Dict[str,str]: - """Parses a annotation_spec path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)/annotationSpecs/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def data_item_path(project: str,location: str,dataset: str,data_item: str,) -> str: - """Returns a fully-qualified data_item string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}".format(project=project, location=location, dataset=dataset, data_item=data_item, ) - - @staticmethod - def parse_data_item_path(path: str) -> Dict[str,str]: - """Parses a data_item path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)/dataItems/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def dataset_path(project: str,location: str,dataset: str,) -> str: - """Returns a fully-qualified dataset string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) - - @staticmethod - def parse_dataset_path(path: str) -> Dict[str,str]: - """Parses a dataset path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, DatasetServiceTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the dataset service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, DatasetServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): - raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, DatasetServiceTransport): - # transport is a DatasetServiceTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - always_use_jwt_access=True, - ) - - def create_dataset(self, - request: Union[dataset_service.CreateDatasetRequest, dict] = None, - *, - parent: str = None, - dataset: gca_dataset.Dataset = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Creates a Dataset. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CreateDatasetRequest, dict]): - The request object. Request message for - [DatasetService.CreateDataset][google.cloud.aiplatform.v1.DatasetService.CreateDataset]. - parent (str): - Required. The resource name of the Location to create - the Dataset in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - dataset (google.cloud.aiplatform_v1.types.Dataset): - Required. The Dataset to create. - This corresponds to the ``dataset`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1.types.Dataset` A - collection of DataItems and Annotations on them. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, dataset]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a dataset_service.CreateDatasetRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, dataset_service.CreateDatasetRequest): - request = dataset_service.CreateDatasetRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if dataset is not None: - request.dataset = dataset - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_dataset] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - gca_dataset.Dataset, - metadata_type=dataset_service.CreateDatasetOperationMetadata, - ) - - # Done; return the response. - return response - - def get_dataset(self, - request: Union[dataset_service.GetDatasetRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> dataset.Dataset: - r"""Gets a Dataset. - - Args: - request (Union[google.cloud.aiplatform_v1.types.GetDatasetRequest, dict]): - The request object. Request message for - [DatasetService.GetDataset][google.cloud.aiplatform.v1.DatasetService.GetDataset]. - name (str): - Required. The name of the Dataset - resource. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Dataset: - A collection of DataItems and - Annotations on them. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a dataset_service.GetDatasetRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, dataset_service.GetDatasetRequest): - request = dataset_service.GetDatasetRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_dataset] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_dataset(self, - request: Union[dataset_service.UpdateDatasetRequest, dict] = None, - *, - dataset: gca_dataset.Dataset = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_dataset.Dataset: - r"""Updates a Dataset. - - Args: - request (Union[google.cloud.aiplatform_v1.types.UpdateDatasetRequest, dict]): - The request object. Request message for - [DatasetService.UpdateDataset][google.cloud.aiplatform.v1.DatasetService.UpdateDataset]. - dataset (google.cloud.aiplatform_v1.types.Dataset): - Required. The Dataset which replaces - the resource on the server. - - This corresponds to the ``dataset`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The update mask applies to the resource. For - the ``FieldMask`` definition, see - [google.protobuf.FieldMask][google.protobuf.FieldMask]. - Updatable fields: - - - ``display_name`` - - ``description`` - - ``labels`` - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Dataset: - A collection of DataItems and - Annotations on them. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([dataset, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a dataset_service.UpdateDatasetRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, dataset_service.UpdateDatasetRequest): - request = dataset_service.UpdateDatasetRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if dataset is not None: - request.dataset = dataset - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_dataset] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("dataset.name", request.dataset.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_datasets(self, - request: Union[dataset_service.ListDatasetsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListDatasetsPager: - r"""Lists Datasets in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListDatasetsRequest, dict]): - The request object. Request message for - [DatasetService.ListDatasets][google.cloud.aiplatform.v1.DatasetService.ListDatasets]. - parent (str): - Required. The name of the Dataset's parent resource. - Format: ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.dataset_service.pagers.ListDatasetsPager: - Response message for - [DatasetService.ListDatasets][google.cloud.aiplatform.v1.DatasetService.ListDatasets]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a dataset_service.ListDatasetsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, dataset_service.ListDatasetsRequest): - request = dataset_service.ListDatasetsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_datasets] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListDatasetsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_dataset(self, - request: Union[dataset_service.DeleteDatasetRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a Dataset. - - Args: - request (Union[google.cloud.aiplatform_v1.types.DeleteDatasetRequest, dict]): - The request object. Request message for - [DatasetService.DeleteDataset][google.cloud.aiplatform.v1.DatasetService.DeleteDataset]. - name (str): - Required. The resource name of the Dataset to delete. - Format: - ``projects/{project}/locations/{location}/datasets/{dataset}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a dataset_service.DeleteDatasetRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, dataset_service.DeleteDatasetRequest): - request = dataset_service.DeleteDatasetRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_dataset] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def import_data(self, - request: Union[dataset_service.ImportDataRequest, dict] = None, - *, - name: str = None, - import_configs: Sequence[dataset.ImportDataConfig] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Imports data into a Dataset. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ImportDataRequest, dict]): - The request object. Request message for - [DatasetService.ImportData][google.cloud.aiplatform.v1.DatasetService.ImportData]. - name (str): - Required. The name of the Dataset resource. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - import_configs (Sequence[google.cloud.aiplatform_v1.types.ImportDataConfig]): - Required. The desired input - locations. The contents of all input - locations will be imported in one batch. - - This corresponds to the ``import_configs`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1.types.ImportDataResponse` - Response message for - [DatasetService.ImportData][google.cloud.aiplatform.v1.DatasetService.ImportData]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, import_configs]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a dataset_service.ImportDataRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, dataset_service.ImportDataRequest): - request = dataset_service.ImportDataRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if import_configs is not None: - request.import_configs = import_configs - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.import_data] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - dataset_service.ImportDataResponse, - metadata_type=dataset_service.ImportDataOperationMetadata, - ) - - # Done; return the response. - return response - - def export_data(self, - request: Union[dataset_service.ExportDataRequest, dict] = None, - *, - name: str = None, - export_config: dataset.ExportDataConfig = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Exports data from a Dataset. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ExportDataRequest, dict]): - The request object. Request message for - [DatasetService.ExportData][google.cloud.aiplatform.v1.DatasetService.ExportData]. - name (str): - Required. The name of the Dataset resource. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - export_config (google.cloud.aiplatform_v1.types.ExportDataConfig): - Required. The desired output - location. - - This corresponds to the ``export_config`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1.types.ExportDataResponse` - Response message for - [DatasetService.ExportData][google.cloud.aiplatform.v1.DatasetService.ExportData]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, export_config]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a dataset_service.ExportDataRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, dataset_service.ExportDataRequest): - request = dataset_service.ExportDataRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if export_config is not None: - request.export_config = export_config - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.export_data] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - dataset_service.ExportDataResponse, - metadata_type=dataset_service.ExportDataOperationMetadata, - ) - - # Done; return the response. - return response - - def list_data_items(self, - request: Union[dataset_service.ListDataItemsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListDataItemsPager: - r"""Lists DataItems in a Dataset. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListDataItemsRequest, dict]): - The request object. Request message for - [DatasetService.ListDataItems][google.cloud.aiplatform.v1.DatasetService.ListDataItems]. - parent (str): - Required. The resource name of the Dataset to list - DataItems from. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.dataset_service.pagers.ListDataItemsPager: - Response message for - [DatasetService.ListDataItems][google.cloud.aiplatform.v1.DatasetService.ListDataItems]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a dataset_service.ListDataItemsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, dataset_service.ListDataItemsRequest): - request = dataset_service.ListDataItemsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_data_items] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListDataItemsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_annotation_spec(self, - request: Union[dataset_service.GetAnnotationSpecRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> annotation_spec.AnnotationSpec: - r"""Gets an AnnotationSpec. - - Args: - request (Union[google.cloud.aiplatform_v1.types.GetAnnotationSpecRequest, dict]): - The request object. Request message for - [DatasetService.GetAnnotationSpec][google.cloud.aiplatform.v1.DatasetService.GetAnnotationSpec]. - name (str): - Required. The name of the AnnotationSpec resource. - Format: - ``projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.AnnotationSpec: - Identifies a concept with which - DataItems may be annotated with. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a dataset_service.GetAnnotationSpecRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, dataset_service.GetAnnotationSpecRequest): - request = dataset_service.GetAnnotationSpecRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_annotation_spec] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_annotations(self, - request: Union[dataset_service.ListAnnotationsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListAnnotationsPager: - r"""Lists Annotations belongs to a dataitem - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListAnnotationsRequest, dict]): - The request object. Request message for - [DatasetService.ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations]. - parent (str): - Required. The resource name of the DataItem to list - Annotations from. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.dataset_service.pagers.ListAnnotationsPager: - Response message for - [DatasetService.ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a dataset_service.ListAnnotationsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, dataset_service.ListAnnotationsRequest): - request = dataset_service.ListAnnotationsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_annotations] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListAnnotationsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - """Releases underlying transport's resources. - - .. warning:: - ONLY use as a context manager if the transport is NOT shared - with other clients! Exiting the with block will CLOSE the transport - and may cause errors in other clients! - """ - self.transport.close() - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "DatasetServiceClient", -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/pagers.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/pagers.py deleted file mode 100644 index 7ee563fc40..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/pagers.py +++ /dev/null @@ -1,387 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator - -from google.cloud.aiplatform_v1.types import annotation -from google.cloud.aiplatform_v1.types import data_item -from google.cloud.aiplatform_v1.types import dataset -from google.cloud.aiplatform_v1.types import dataset_service - - -class ListDatasetsPager: - """A pager for iterating through ``list_datasets`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListDatasetsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``datasets`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListDatasets`` requests and continue to iterate - through the ``datasets`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListDatasetsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., dataset_service.ListDatasetsResponse], - request: dataset_service.ListDatasetsRequest, - response: dataset_service.ListDatasetsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListDatasetsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListDatasetsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = dataset_service.ListDatasetsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[dataset_service.ListDatasetsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[dataset.Dataset]: - for page in self.pages: - yield from page.datasets - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListDatasetsAsyncPager: - """A pager for iterating through ``list_datasets`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListDatasetsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``datasets`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListDatasets`` requests and continue to iterate - through the ``datasets`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListDatasetsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[dataset_service.ListDatasetsResponse]], - request: dataset_service.ListDatasetsRequest, - response: dataset_service.ListDatasetsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListDatasetsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListDatasetsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = dataset_service.ListDatasetsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[dataset_service.ListDatasetsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[dataset.Dataset]: - async def async_generator(): - async for page in self.pages: - for response in page.datasets: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListDataItemsPager: - """A pager for iterating through ``list_data_items`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListDataItemsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``data_items`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListDataItems`` requests and continue to iterate - through the ``data_items`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListDataItemsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., dataset_service.ListDataItemsResponse], - request: dataset_service.ListDataItemsRequest, - response: dataset_service.ListDataItemsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListDataItemsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListDataItemsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = dataset_service.ListDataItemsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[dataset_service.ListDataItemsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[data_item.DataItem]: - for page in self.pages: - yield from page.data_items - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListDataItemsAsyncPager: - """A pager for iterating through ``list_data_items`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListDataItemsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``data_items`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListDataItems`` requests and continue to iterate - through the ``data_items`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListDataItemsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[dataset_service.ListDataItemsResponse]], - request: dataset_service.ListDataItemsRequest, - response: dataset_service.ListDataItemsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListDataItemsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListDataItemsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = dataset_service.ListDataItemsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[dataset_service.ListDataItemsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[data_item.DataItem]: - async def async_generator(): - async for page in self.pages: - for response in page.data_items: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListAnnotationsPager: - """A pager for iterating through ``list_annotations`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListAnnotationsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``annotations`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListAnnotations`` requests and continue to iterate - through the ``annotations`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListAnnotationsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., dataset_service.ListAnnotationsResponse], - request: dataset_service.ListAnnotationsRequest, - response: dataset_service.ListAnnotationsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListAnnotationsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListAnnotationsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = dataset_service.ListAnnotationsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[dataset_service.ListAnnotationsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[annotation.Annotation]: - for page in self.pages: - yield from page.annotations - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListAnnotationsAsyncPager: - """A pager for iterating through ``list_annotations`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListAnnotationsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``annotations`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListAnnotations`` requests and continue to iterate - through the ``annotations`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListAnnotationsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[dataset_service.ListAnnotationsResponse]], - request: dataset_service.ListAnnotationsRequest, - response: dataset_service.ListAnnotationsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListAnnotationsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListAnnotationsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = dataset_service.ListAnnotationsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[dataset_service.ListAnnotationsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[annotation.Annotation]: - async def async_generator(): - async for page in self.pages: - for response in page.annotations: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/__init__.py deleted file mode 100644 index 561b0c5cfd..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import DatasetServiceTransport -from .grpc import DatasetServiceGrpcTransport -from .grpc_asyncio import DatasetServiceGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[DatasetServiceTransport]] -_transport_registry['grpc'] = DatasetServiceGrpcTransport -_transport_registry['grpc_asyncio'] = DatasetServiceGrpcAsyncIOTransport - -__all__ = ( - 'DatasetServiceTransport', - 'DatasetServiceGrpcTransport', - 'DatasetServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/base.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/base.py deleted file mode 100644 index c0c5ac0002..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/base.py +++ /dev/null @@ -1,282 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import pkg_resources - -import google.auth # type: ignore -import google.api_core -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.aiplatform_v1.types import annotation_spec -from google.cloud.aiplatform_v1.types import dataset -from google.cloud.aiplatform_v1.types import dataset as gca_dataset -from google.cloud.aiplatform_v1.types import dataset_service -from google.longrunning import operations_pb2 # type: ignore - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-aiplatform', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -class DatasetServiceTransport(abc.ABC): - """Abstract transport class for DatasetService.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'aiplatform.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials are service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.create_dataset: gapic_v1.method.wrap_method( - self.create_dataset, - default_timeout=None, - client_info=client_info, - ), - self.get_dataset: gapic_v1.method.wrap_method( - self.get_dataset, - default_timeout=None, - client_info=client_info, - ), - self.update_dataset: gapic_v1.method.wrap_method( - self.update_dataset, - default_timeout=None, - client_info=client_info, - ), - self.list_datasets: gapic_v1.method.wrap_method( - self.list_datasets, - default_timeout=None, - client_info=client_info, - ), - self.delete_dataset: gapic_v1.method.wrap_method( - self.delete_dataset, - default_timeout=None, - client_info=client_info, - ), - self.import_data: gapic_v1.method.wrap_method( - self.import_data, - default_timeout=None, - client_info=client_info, - ), - self.export_data: gapic_v1.method.wrap_method( - self.export_data, - default_timeout=None, - client_info=client_info, - ), - self.list_data_items: gapic_v1.method.wrap_method( - self.list_data_items, - default_timeout=None, - client_info=client_info, - ), - self.get_annotation_spec: gapic_v1.method.wrap_method( - self.get_annotation_spec, - default_timeout=None, - client_info=client_info, - ), - self.list_annotations: gapic_v1.method.wrap_method( - self.list_annotations, - default_timeout=None, - client_info=client_info, - ), - } - - def close(self): - """Closes resources associated with the transport. - - .. warning:: - Only call this method if the transport is NOT shared - with other clients - this may cause errors in other clients! - """ - raise NotImplementedError() - - @property - def operations_client(self): - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def create_dataset(self) -> Callable[ - [dataset_service.CreateDatasetRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def get_dataset(self) -> Callable[ - [dataset_service.GetDatasetRequest], - Union[ - dataset.Dataset, - Awaitable[dataset.Dataset] - ]]: - raise NotImplementedError() - - @property - def update_dataset(self) -> Callable[ - [dataset_service.UpdateDatasetRequest], - Union[ - gca_dataset.Dataset, - Awaitable[gca_dataset.Dataset] - ]]: - raise NotImplementedError() - - @property - def list_datasets(self) -> Callable[ - [dataset_service.ListDatasetsRequest], - Union[ - dataset_service.ListDatasetsResponse, - Awaitable[dataset_service.ListDatasetsResponse] - ]]: - raise NotImplementedError() - - @property - def delete_dataset(self) -> Callable[ - [dataset_service.DeleteDatasetRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def import_data(self) -> Callable[ - [dataset_service.ImportDataRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def export_data(self) -> Callable[ - [dataset_service.ExportDataRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def list_data_items(self) -> Callable[ - [dataset_service.ListDataItemsRequest], - Union[ - dataset_service.ListDataItemsResponse, - Awaitable[dataset_service.ListDataItemsResponse] - ]]: - raise NotImplementedError() - - @property - def get_annotation_spec(self) -> Callable[ - [dataset_service.GetAnnotationSpecRequest], - Union[ - annotation_spec.AnnotationSpec, - Awaitable[annotation_spec.AnnotationSpec] - ]]: - raise NotImplementedError() - - @property - def list_annotations(self) -> Callable[ - [dataset_service.ListAnnotationsRequest], - Union[ - dataset_service.ListAnnotationsResponse, - Awaitable[dataset_service.ListAnnotationsResponse] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'DatasetServiceTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc.py deleted file mode 100644 index d015727921..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc.py +++ /dev/null @@ -1,511 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers -from google.api_core import operations_v1 -from google.api_core import gapic_v1 -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.aiplatform_v1.types import annotation_spec -from google.cloud.aiplatform_v1.types import dataset -from google.cloud.aiplatform_v1.types import dataset as gca_dataset -from google.cloud.aiplatform_v1.types import dataset_service -from google.longrunning import operations_pb2 # type: ignore -from .base import DatasetServiceTransport, DEFAULT_CLIENT_INFO - - -class DatasetServiceGrpcTransport(DatasetServiceTransport): - """gRPC backend transport for DatasetService. - - The service that handles the CRUD of Vertex AI Dataset and - its child resources. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_dataset(self) -> Callable[ - [dataset_service.CreateDatasetRequest], - operations_pb2.Operation]: - r"""Return a callable for the create dataset method over gRPC. - - Creates a Dataset. - - Returns: - Callable[[~.CreateDatasetRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_dataset' not in self._stubs: - self._stubs['create_dataset'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.DatasetService/CreateDataset', - request_serializer=dataset_service.CreateDatasetRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_dataset'] - - @property - def get_dataset(self) -> Callable[ - [dataset_service.GetDatasetRequest], - dataset.Dataset]: - r"""Return a callable for the get dataset method over gRPC. - - Gets a Dataset. - - Returns: - Callable[[~.GetDatasetRequest], - ~.Dataset]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_dataset' not in self._stubs: - self._stubs['get_dataset'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.DatasetService/GetDataset', - request_serializer=dataset_service.GetDatasetRequest.serialize, - response_deserializer=dataset.Dataset.deserialize, - ) - return self._stubs['get_dataset'] - - @property - def update_dataset(self) -> Callable[ - [dataset_service.UpdateDatasetRequest], - gca_dataset.Dataset]: - r"""Return a callable for the update dataset method over gRPC. - - Updates a Dataset. - - Returns: - Callable[[~.UpdateDatasetRequest], - ~.Dataset]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_dataset' not in self._stubs: - self._stubs['update_dataset'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.DatasetService/UpdateDataset', - request_serializer=dataset_service.UpdateDatasetRequest.serialize, - response_deserializer=gca_dataset.Dataset.deserialize, - ) - return self._stubs['update_dataset'] - - @property - def list_datasets(self) -> Callable[ - [dataset_service.ListDatasetsRequest], - dataset_service.ListDatasetsResponse]: - r"""Return a callable for the list datasets method over gRPC. - - Lists Datasets in a Location. - - Returns: - Callable[[~.ListDatasetsRequest], - ~.ListDatasetsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_datasets' not in self._stubs: - self._stubs['list_datasets'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.DatasetService/ListDatasets', - request_serializer=dataset_service.ListDatasetsRequest.serialize, - response_deserializer=dataset_service.ListDatasetsResponse.deserialize, - ) - return self._stubs['list_datasets'] - - @property - def delete_dataset(self) -> Callable[ - [dataset_service.DeleteDatasetRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete dataset method over gRPC. - - Deletes a Dataset. - - Returns: - Callable[[~.DeleteDatasetRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_dataset' not in self._stubs: - self._stubs['delete_dataset'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.DatasetService/DeleteDataset', - request_serializer=dataset_service.DeleteDatasetRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_dataset'] - - @property - def import_data(self) -> Callable[ - [dataset_service.ImportDataRequest], - operations_pb2.Operation]: - r"""Return a callable for the import data method over gRPC. - - Imports data into a Dataset. - - Returns: - Callable[[~.ImportDataRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'import_data' not in self._stubs: - self._stubs['import_data'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.DatasetService/ImportData', - request_serializer=dataset_service.ImportDataRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['import_data'] - - @property - def export_data(self) -> Callable[ - [dataset_service.ExportDataRequest], - operations_pb2.Operation]: - r"""Return a callable for the export data method over gRPC. - - Exports data from a Dataset. - - Returns: - Callable[[~.ExportDataRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'export_data' not in self._stubs: - self._stubs['export_data'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.DatasetService/ExportData', - request_serializer=dataset_service.ExportDataRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['export_data'] - - @property - def list_data_items(self) -> Callable[ - [dataset_service.ListDataItemsRequest], - dataset_service.ListDataItemsResponse]: - r"""Return a callable for the list data items method over gRPC. - - Lists DataItems in a Dataset. - - Returns: - Callable[[~.ListDataItemsRequest], - ~.ListDataItemsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_data_items' not in self._stubs: - self._stubs['list_data_items'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.DatasetService/ListDataItems', - request_serializer=dataset_service.ListDataItemsRequest.serialize, - response_deserializer=dataset_service.ListDataItemsResponse.deserialize, - ) - return self._stubs['list_data_items'] - - @property - def get_annotation_spec(self) -> Callable[ - [dataset_service.GetAnnotationSpecRequest], - annotation_spec.AnnotationSpec]: - r"""Return a callable for the get annotation spec method over gRPC. - - Gets an AnnotationSpec. - - Returns: - Callable[[~.GetAnnotationSpecRequest], - ~.AnnotationSpec]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_annotation_spec' not in self._stubs: - self._stubs['get_annotation_spec'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.DatasetService/GetAnnotationSpec', - request_serializer=dataset_service.GetAnnotationSpecRequest.serialize, - response_deserializer=annotation_spec.AnnotationSpec.deserialize, - ) - return self._stubs['get_annotation_spec'] - - @property - def list_annotations(self) -> Callable[ - [dataset_service.ListAnnotationsRequest], - dataset_service.ListAnnotationsResponse]: - r"""Return a callable for the list annotations method over gRPC. - - Lists Annotations belongs to a dataitem - - Returns: - Callable[[~.ListAnnotationsRequest], - ~.ListAnnotationsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_annotations' not in self._stubs: - self._stubs['list_annotations'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.DatasetService/ListAnnotations', - request_serializer=dataset_service.ListAnnotationsRequest.serialize, - response_deserializer=dataset_service.ListAnnotationsResponse.deserialize, - ) - return self._stubs['list_annotations'] - - def close(self): - self.grpc_channel.close() - -__all__ = ( - 'DatasetServiceGrpcTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc_asyncio.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc_asyncio.py deleted file mode 100644 index dd76aab381..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/dataset_service/transports/grpc_asyncio.py +++ /dev/null @@ -1,515 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers_async -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.aiplatform_v1.types import annotation_spec -from google.cloud.aiplatform_v1.types import dataset -from google.cloud.aiplatform_v1.types import dataset as gca_dataset -from google.cloud.aiplatform_v1.types import dataset_service -from google.longrunning import operations_pb2 # type: ignore -from .base import DatasetServiceTransport, DEFAULT_CLIENT_INFO -from .grpc import DatasetServiceGrpcTransport - - -class DatasetServiceGrpcAsyncIOTransport(DatasetServiceTransport): - """gRPC AsyncIO backend transport for DatasetService. - - The service that handles the CRUD of Vertex AI Dataset and - its child resources. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsAsyncClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_dataset(self) -> Callable[ - [dataset_service.CreateDatasetRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the create dataset method over gRPC. - - Creates a Dataset. - - Returns: - Callable[[~.CreateDatasetRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_dataset' not in self._stubs: - self._stubs['create_dataset'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.DatasetService/CreateDataset', - request_serializer=dataset_service.CreateDatasetRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_dataset'] - - @property - def get_dataset(self) -> Callable[ - [dataset_service.GetDatasetRequest], - Awaitable[dataset.Dataset]]: - r"""Return a callable for the get dataset method over gRPC. - - Gets a Dataset. - - Returns: - Callable[[~.GetDatasetRequest], - Awaitable[~.Dataset]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_dataset' not in self._stubs: - self._stubs['get_dataset'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.DatasetService/GetDataset', - request_serializer=dataset_service.GetDatasetRequest.serialize, - response_deserializer=dataset.Dataset.deserialize, - ) - return self._stubs['get_dataset'] - - @property - def update_dataset(self) -> Callable[ - [dataset_service.UpdateDatasetRequest], - Awaitable[gca_dataset.Dataset]]: - r"""Return a callable for the update dataset method over gRPC. - - Updates a Dataset. - - Returns: - Callable[[~.UpdateDatasetRequest], - Awaitable[~.Dataset]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_dataset' not in self._stubs: - self._stubs['update_dataset'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.DatasetService/UpdateDataset', - request_serializer=dataset_service.UpdateDatasetRequest.serialize, - response_deserializer=gca_dataset.Dataset.deserialize, - ) - return self._stubs['update_dataset'] - - @property - def list_datasets(self) -> Callable[ - [dataset_service.ListDatasetsRequest], - Awaitable[dataset_service.ListDatasetsResponse]]: - r"""Return a callable for the list datasets method over gRPC. - - Lists Datasets in a Location. - - Returns: - Callable[[~.ListDatasetsRequest], - Awaitable[~.ListDatasetsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_datasets' not in self._stubs: - self._stubs['list_datasets'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.DatasetService/ListDatasets', - request_serializer=dataset_service.ListDatasetsRequest.serialize, - response_deserializer=dataset_service.ListDatasetsResponse.deserialize, - ) - return self._stubs['list_datasets'] - - @property - def delete_dataset(self) -> Callable[ - [dataset_service.DeleteDatasetRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete dataset method over gRPC. - - Deletes a Dataset. - - Returns: - Callable[[~.DeleteDatasetRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_dataset' not in self._stubs: - self._stubs['delete_dataset'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.DatasetService/DeleteDataset', - request_serializer=dataset_service.DeleteDatasetRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_dataset'] - - @property - def import_data(self) -> Callable[ - [dataset_service.ImportDataRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the import data method over gRPC. - - Imports data into a Dataset. - - Returns: - Callable[[~.ImportDataRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'import_data' not in self._stubs: - self._stubs['import_data'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.DatasetService/ImportData', - request_serializer=dataset_service.ImportDataRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['import_data'] - - @property - def export_data(self) -> Callable[ - [dataset_service.ExportDataRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the export data method over gRPC. - - Exports data from a Dataset. - - Returns: - Callable[[~.ExportDataRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'export_data' not in self._stubs: - self._stubs['export_data'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.DatasetService/ExportData', - request_serializer=dataset_service.ExportDataRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['export_data'] - - @property - def list_data_items(self) -> Callable[ - [dataset_service.ListDataItemsRequest], - Awaitable[dataset_service.ListDataItemsResponse]]: - r"""Return a callable for the list data items method over gRPC. - - Lists DataItems in a Dataset. - - Returns: - Callable[[~.ListDataItemsRequest], - Awaitable[~.ListDataItemsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_data_items' not in self._stubs: - self._stubs['list_data_items'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.DatasetService/ListDataItems', - request_serializer=dataset_service.ListDataItemsRequest.serialize, - response_deserializer=dataset_service.ListDataItemsResponse.deserialize, - ) - return self._stubs['list_data_items'] - - @property - def get_annotation_spec(self) -> Callable[ - [dataset_service.GetAnnotationSpecRequest], - Awaitable[annotation_spec.AnnotationSpec]]: - r"""Return a callable for the get annotation spec method over gRPC. - - Gets an AnnotationSpec. - - Returns: - Callable[[~.GetAnnotationSpecRequest], - Awaitable[~.AnnotationSpec]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_annotation_spec' not in self._stubs: - self._stubs['get_annotation_spec'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.DatasetService/GetAnnotationSpec', - request_serializer=dataset_service.GetAnnotationSpecRequest.serialize, - response_deserializer=annotation_spec.AnnotationSpec.deserialize, - ) - return self._stubs['get_annotation_spec'] - - @property - def list_annotations(self) -> Callable[ - [dataset_service.ListAnnotationsRequest], - Awaitable[dataset_service.ListAnnotationsResponse]]: - r"""Return a callable for the list annotations method over gRPC. - - Lists Annotations belongs to a dataitem - - Returns: - Callable[[~.ListAnnotationsRequest], - Awaitable[~.ListAnnotationsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_annotations' not in self._stubs: - self._stubs['list_annotations'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.DatasetService/ListAnnotations', - request_serializer=dataset_service.ListAnnotationsRequest.serialize, - response_deserializer=dataset_service.ListAnnotationsResponse.deserialize, - ) - return self._stubs['list_annotations'] - - def close(self): - return self.grpc_channel.close() - - -__all__ = ( - 'DatasetServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/__init__.py deleted file mode 100644 index 7db43e768e..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import EndpointServiceClient -from .async_client import EndpointServiceAsyncClient - -__all__ = ( - 'EndpointServiceClient', - 'EndpointServiceAsyncClient', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py deleted file mode 100644 index c98653eb3f..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/async_client.py +++ /dev/null @@ -1,889 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core.client_options import ClientOptions -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1.services.endpoint_service import pagers -from google.cloud.aiplatform_v1.types import encryption_spec -from google.cloud.aiplatform_v1.types import endpoint -from google.cloud.aiplatform_v1.types import endpoint as gca_endpoint -from google.cloud.aiplatform_v1.types import endpoint_service -from google.cloud.aiplatform_v1.types import operation as gca_operation -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import EndpointServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import EndpointServiceGrpcAsyncIOTransport -from .client import EndpointServiceClient - - -class EndpointServiceAsyncClient: - """A service for managing Vertex AI's Endpoints.""" - - _client: EndpointServiceClient - - DEFAULT_ENDPOINT = EndpointServiceClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = EndpointServiceClient.DEFAULT_MTLS_ENDPOINT - - endpoint_path = staticmethod(EndpointServiceClient.endpoint_path) - parse_endpoint_path = staticmethod(EndpointServiceClient.parse_endpoint_path) - model_path = staticmethod(EndpointServiceClient.model_path) - parse_model_path = staticmethod(EndpointServiceClient.parse_model_path) - model_deployment_monitoring_job_path = staticmethod(EndpointServiceClient.model_deployment_monitoring_job_path) - parse_model_deployment_monitoring_job_path = staticmethod(EndpointServiceClient.parse_model_deployment_monitoring_job_path) - network_path = staticmethod(EndpointServiceClient.network_path) - parse_network_path = staticmethod(EndpointServiceClient.parse_network_path) - common_billing_account_path = staticmethod(EndpointServiceClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(EndpointServiceClient.parse_common_billing_account_path) - common_folder_path = staticmethod(EndpointServiceClient.common_folder_path) - parse_common_folder_path = staticmethod(EndpointServiceClient.parse_common_folder_path) - common_organization_path = staticmethod(EndpointServiceClient.common_organization_path) - parse_common_organization_path = staticmethod(EndpointServiceClient.parse_common_organization_path) - common_project_path = staticmethod(EndpointServiceClient.common_project_path) - parse_common_project_path = staticmethod(EndpointServiceClient.parse_common_project_path) - common_location_path = staticmethod(EndpointServiceClient.common_location_path) - parse_common_location_path = staticmethod(EndpointServiceClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - EndpointServiceAsyncClient: The constructed client. - """ - return EndpointServiceClient.from_service_account_info.__func__(EndpointServiceAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - EndpointServiceAsyncClient: The constructed client. - """ - return EndpointServiceClient.from_service_account_file.__func__(EndpointServiceAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> EndpointServiceTransport: - """Returns the transport used by the client instance. - - Returns: - EndpointServiceTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(EndpointServiceClient).get_transport_class, type(EndpointServiceClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, EndpointServiceTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the endpoint service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.EndpointServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = EndpointServiceClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def create_endpoint(self, - request: Union[endpoint_service.CreateEndpointRequest, dict] = None, - *, - parent: str = None, - endpoint: gca_endpoint.Endpoint = None, - endpoint_id: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Creates an Endpoint. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CreateEndpointRequest, dict]): - The request object. Request message for - [EndpointService.CreateEndpoint][google.cloud.aiplatform.v1.EndpointService.CreateEndpoint]. - parent (:class:`str`): - Required. The resource name of the Location to create - the Endpoint in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - endpoint (:class:`google.cloud.aiplatform_v1.types.Endpoint`): - Required. The Endpoint to create. - This corresponds to the ``endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - endpoint_id (:class:`str`): - Immutable. The ID to use for endpoint, which will become - the final component of the endpoint resource name. If - not provided, Vertex AI will generate a value for this - ID. - - This value should be 1-10 characters, and valid - characters are /[0-9]/. When using HTTP/JSON, this field - is populated based on a query string argument, such as - ``?endpoint_id=12345``. This is the fallback for fields - that are not included in either the URI or the body. - - This corresponds to the ``endpoint_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Endpoint` Models are deployed into it, and afterwards Endpoint is called to obtain - predictions and explanations. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, endpoint, endpoint_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = endpoint_service.CreateEndpointRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if endpoint is not None: - request.endpoint = endpoint - if endpoint_id is not None: - request.endpoint_id = endpoint_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_endpoint, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - gca_endpoint.Endpoint, - metadata_type=endpoint_service.CreateEndpointOperationMetadata, - ) - - # Done; return the response. - return response - - async def get_endpoint(self, - request: Union[endpoint_service.GetEndpointRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> endpoint.Endpoint: - r"""Gets an Endpoint. - - Args: - request (Union[google.cloud.aiplatform_v1.types.GetEndpointRequest, dict]): - The request object. Request message for - [EndpointService.GetEndpoint][google.cloud.aiplatform.v1.EndpointService.GetEndpoint] - name (:class:`str`): - Required. The name of the Endpoint resource. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Endpoint: - Models are deployed into it, and - afterwards Endpoint is called to obtain - predictions and explanations. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = endpoint_service.GetEndpointRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_endpoint, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_endpoints(self, - request: Union[endpoint_service.ListEndpointsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListEndpointsAsyncPager: - r"""Lists Endpoints in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListEndpointsRequest, dict]): - The request object. Request message for - [EndpointService.ListEndpoints][google.cloud.aiplatform.v1.EndpointService.ListEndpoints]. - parent (:class:`str`): - Required. The resource name of the Location from which - to list the Endpoints. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.endpoint_service.pagers.ListEndpointsAsyncPager: - Response message for - [EndpointService.ListEndpoints][google.cloud.aiplatform.v1.EndpointService.ListEndpoints]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = endpoint_service.ListEndpointsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_endpoints, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListEndpointsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_endpoint(self, - request: Union[endpoint_service.UpdateEndpointRequest, dict] = None, - *, - endpoint: gca_endpoint.Endpoint = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_endpoint.Endpoint: - r"""Updates an Endpoint. - - Args: - request (Union[google.cloud.aiplatform_v1.types.UpdateEndpointRequest, dict]): - The request object. Request message for - [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1.EndpointService.UpdateEndpoint]. - endpoint (:class:`google.cloud.aiplatform_v1.types.Endpoint`): - Required. The Endpoint which replaces - the resource on the server. - - This corresponds to the ``endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. The update mask applies to the resource. See - [google.protobuf.FieldMask][google.protobuf.FieldMask]. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Endpoint: - Models are deployed into it, and - afterwards Endpoint is called to obtain - predictions and explanations. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = endpoint_service.UpdateEndpointRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if endpoint is not None: - request.endpoint = endpoint - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_endpoint, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("endpoint.name", request.endpoint.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_endpoint(self, - request: Union[endpoint_service.DeleteEndpointRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes an Endpoint. - - Args: - request (Union[google.cloud.aiplatform_v1.types.DeleteEndpointRequest, dict]): - The request object. Request message for - [EndpointService.DeleteEndpoint][google.cloud.aiplatform.v1.EndpointService.DeleteEndpoint]. - name (:class:`str`): - Required. The name of the Endpoint resource to be - deleted. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = endpoint_service.DeleteEndpointRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_endpoint, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def deploy_model(self, - request: Union[endpoint_service.DeployModelRequest, dict] = None, - *, - endpoint: str = None, - deployed_model: gca_endpoint.DeployedModel = None, - traffic_split: Sequence[endpoint_service.DeployModelRequest.TrafficSplitEntry] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deploys a Model into this Endpoint, creating a - DeployedModel within it. - - Args: - request (Union[google.cloud.aiplatform_v1.types.DeployModelRequest, dict]): - The request object. Request message for - [EndpointService.DeployModel][google.cloud.aiplatform.v1.EndpointService.DeployModel]. - endpoint (:class:`str`): - Required. The name of the Endpoint resource into which - to deploy a Model. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - - This corresponds to the ``endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - deployed_model (:class:`google.cloud.aiplatform_v1.types.DeployedModel`): - Required. The DeployedModel to be created within the - Endpoint. Note that - [Endpoint.traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] - must be updated for the DeployedModel to start receiving - traffic, either as part of this call, or via - [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1.EndpointService.UpdateEndpoint]. - - This corresponds to the ``deployed_model`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - traffic_split (:class:`Sequence[google.cloud.aiplatform_v1.types.DeployModelRequest.TrafficSplitEntry]`): - A map from a DeployedModel's ID to the percentage of - this Endpoint's traffic that should be forwarded to that - DeployedModel. - - If this field is non-empty, then the Endpoint's - [traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] - will be overwritten with it. To refer to the ID of the - just being deployed Model, a "0" should be used, and the - actual ID of the new DeployedModel will be filled in its - place by this method. The traffic percentage values must - add up to 100. - - If this field is empty, then the Endpoint's - [traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] - is not updated. - - This corresponds to the ``traffic_split`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1.types.DeployModelResponse` - Response message for - [EndpointService.DeployModel][google.cloud.aiplatform.v1.EndpointService.DeployModel]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, deployed_model, traffic_split]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = endpoint_service.DeployModelRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if endpoint is not None: - request.endpoint = endpoint - if deployed_model is not None: - request.deployed_model = deployed_model - - if traffic_split: - request.traffic_split.update(traffic_split) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.deploy_model, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("endpoint", request.endpoint), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - endpoint_service.DeployModelResponse, - metadata_type=endpoint_service.DeployModelOperationMetadata, - ) - - # Done; return the response. - return response - - async def undeploy_model(self, - request: Union[endpoint_service.UndeployModelRequest, dict] = None, - *, - endpoint: str = None, - deployed_model_id: str = None, - traffic_split: Sequence[endpoint_service.UndeployModelRequest.TrafficSplitEntry] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Undeploys a Model from an Endpoint, removing a - DeployedModel from it, and freeing all resources it's - using. - - Args: - request (Union[google.cloud.aiplatform_v1.types.UndeployModelRequest, dict]): - The request object. Request message for - [EndpointService.UndeployModel][google.cloud.aiplatform.v1.EndpointService.UndeployModel]. - endpoint (:class:`str`): - Required. The name of the Endpoint resource from which - to undeploy a Model. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - - This corresponds to the ``endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - deployed_model_id (:class:`str`): - Required. The ID of the DeployedModel - to be undeployed from the Endpoint. - - This corresponds to the ``deployed_model_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - traffic_split (:class:`Sequence[google.cloud.aiplatform_v1.types.UndeployModelRequest.TrafficSplitEntry]`): - If this field is provided, then the Endpoint's - [traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] - will be overwritten with it. If last DeployedModel is - being undeployed from the Endpoint, the - [Endpoint.traffic_split] will always end up empty when - this call returns. A DeployedModel will be successfully - undeployed only if it doesn't have any traffic assigned - to it when this method executes, or if this field - unassigns any traffic to it. - - This corresponds to the ``traffic_split`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1.types.UndeployModelResponse` - Response message for - [EndpointService.UndeployModel][google.cloud.aiplatform.v1.EndpointService.UndeployModel]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, deployed_model_id, traffic_split]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = endpoint_service.UndeployModelRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if endpoint is not None: - request.endpoint = endpoint - if deployed_model_id is not None: - request.deployed_model_id = deployed_model_id - - if traffic_split: - request.traffic_split.update(traffic_split) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.undeploy_model, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("endpoint", request.endpoint), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - endpoint_service.UndeployModelResponse, - metadata_type=endpoint_service.UndeployModelOperationMetadata, - ) - - # Done; return the response. - return response - - async def __aenter__(self): - return self - - async def __aexit__(self, exc_type, exc, tb): - await self.transport.close() - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "EndpointServiceAsyncClient", -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/client.py deleted file mode 100644 index e2ee43e89a..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/client.py +++ /dev/null @@ -1,1112 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import os -import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1.services.endpoint_service import pagers -from google.cloud.aiplatform_v1.types import encryption_spec -from google.cloud.aiplatform_v1.types import endpoint -from google.cloud.aiplatform_v1.types import endpoint as gca_endpoint -from google.cloud.aiplatform_v1.types import endpoint_service -from google.cloud.aiplatform_v1.types import operation as gca_operation -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import EndpointServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import EndpointServiceGrpcTransport -from .transports.grpc_asyncio import EndpointServiceGrpcAsyncIOTransport - - -class EndpointServiceClientMeta(type): - """Metaclass for the EndpointService client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[EndpointServiceTransport]] - _transport_registry["grpc"] = EndpointServiceGrpcTransport - _transport_registry["grpc_asyncio"] = EndpointServiceGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[EndpointServiceTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class EndpointServiceClient(metaclass=EndpointServiceClientMeta): - """A service for managing Vertex AI's Endpoints.""" - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "aiplatform.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - EndpointServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - EndpointServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> EndpointServiceTransport: - """Returns the transport used by the client instance. - - Returns: - EndpointServiceTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def endpoint_path(project: str,location: str,endpoint: str,) -> str: - """Returns a fully-qualified endpoint string.""" - return "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) - - @staticmethod - def parse_endpoint_path(path: str) -> Dict[str,str]: - """Parses a endpoint path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/endpoints/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def model_path(project: str,location: str,model: str,) -> str: - """Returns a fully-qualified model string.""" - return "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) - - @staticmethod - def parse_model_path(path: str) -> Dict[str,str]: - """Parses a model path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def model_deployment_monitoring_job_path(project: str,location: str,model_deployment_monitoring_job: str,) -> str: - """Returns a fully-qualified model_deployment_monitoring_job string.""" - return "projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}".format(project=project, location=location, model_deployment_monitoring_job=model_deployment_monitoring_job, ) - - @staticmethod - def parse_model_deployment_monitoring_job_path(path: str) -> Dict[str,str]: - """Parses a model_deployment_monitoring_job path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/modelDeploymentMonitoringJobs/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def network_path(project: str,network: str,) -> str: - """Returns a fully-qualified network string.""" - return "projects/{project}/global/networks/{network}".format(project=project, network=network, ) - - @staticmethod - def parse_network_path(path: str) -> Dict[str,str]: - """Parses a network path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/global/networks/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, EndpointServiceTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the endpoint service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, EndpointServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): - raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, EndpointServiceTransport): - # transport is a EndpointServiceTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - always_use_jwt_access=True, - ) - - def create_endpoint(self, - request: Union[endpoint_service.CreateEndpointRequest, dict] = None, - *, - parent: str = None, - endpoint: gca_endpoint.Endpoint = None, - endpoint_id: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Creates an Endpoint. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CreateEndpointRequest, dict]): - The request object. Request message for - [EndpointService.CreateEndpoint][google.cloud.aiplatform.v1.EndpointService.CreateEndpoint]. - parent (str): - Required. The resource name of the Location to create - the Endpoint in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - endpoint (google.cloud.aiplatform_v1.types.Endpoint): - Required. The Endpoint to create. - This corresponds to the ``endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - endpoint_id (str): - Immutable. The ID to use for endpoint, which will become - the final component of the endpoint resource name. If - not provided, Vertex AI will generate a value for this - ID. - - This value should be 1-10 characters, and valid - characters are /[0-9]/. When using HTTP/JSON, this field - is populated based on a query string argument, such as - ``?endpoint_id=12345``. This is the fallback for fields - that are not included in either the URI or the body. - - This corresponds to the ``endpoint_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Endpoint` Models are deployed into it, and afterwards Endpoint is called to obtain - predictions and explanations. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, endpoint, endpoint_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a endpoint_service.CreateEndpointRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, endpoint_service.CreateEndpointRequest): - request = endpoint_service.CreateEndpointRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if endpoint is not None: - request.endpoint = endpoint - if endpoint_id is not None: - request.endpoint_id = endpoint_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_endpoint] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - gca_endpoint.Endpoint, - metadata_type=endpoint_service.CreateEndpointOperationMetadata, - ) - - # Done; return the response. - return response - - def get_endpoint(self, - request: Union[endpoint_service.GetEndpointRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> endpoint.Endpoint: - r"""Gets an Endpoint. - - Args: - request (Union[google.cloud.aiplatform_v1.types.GetEndpointRequest, dict]): - The request object. Request message for - [EndpointService.GetEndpoint][google.cloud.aiplatform.v1.EndpointService.GetEndpoint] - name (str): - Required. The name of the Endpoint resource. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Endpoint: - Models are deployed into it, and - afterwards Endpoint is called to obtain - predictions and explanations. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a endpoint_service.GetEndpointRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, endpoint_service.GetEndpointRequest): - request = endpoint_service.GetEndpointRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_endpoint] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_endpoints(self, - request: Union[endpoint_service.ListEndpointsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListEndpointsPager: - r"""Lists Endpoints in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListEndpointsRequest, dict]): - The request object. Request message for - [EndpointService.ListEndpoints][google.cloud.aiplatform.v1.EndpointService.ListEndpoints]. - parent (str): - Required. The resource name of the Location from which - to list the Endpoints. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.endpoint_service.pagers.ListEndpointsPager: - Response message for - [EndpointService.ListEndpoints][google.cloud.aiplatform.v1.EndpointService.ListEndpoints]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a endpoint_service.ListEndpointsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, endpoint_service.ListEndpointsRequest): - request = endpoint_service.ListEndpointsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_endpoints] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListEndpointsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_endpoint(self, - request: Union[endpoint_service.UpdateEndpointRequest, dict] = None, - *, - endpoint: gca_endpoint.Endpoint = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_endpoint.Endpoint: - r"""Updates an Endpoint. - - Args: - request (Union[google.cloud.aiplatform_v1.types.UpdateEndpointRequest, dict]): - The request object. Request message for - [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1.EndpointService.UpdateEndpoint]. - endpoint (google.cloud.aiplatform_v1.types.Endpoint): - Required. The Endpoint which replaces - the resource on the server. - - This corresponds to the ``endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The update mask applies to the resource. See - [google.protobuf.FieldMask][google.protobuf.FieldMask]. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Endpoint: - Models are deployed into it, and - afterwards Endpoint is called to obtain - predictions and explanations. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a endpoint_service.UpdateEndpointRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, endpoint_service.UpdateEndpointRequest): - request = endpoint_service.UpdateEndpointRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if endpoint is not None: - request.endpoint = endpoint - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_endpoint] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("endpoint.name", request.endpoint.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_endpoint(self, - request: Union[endpoint_service.DeleteEndpointRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes an Endpoint. - - Args: - request (Union[google.cloud.aiplatform_v1.types.DeleteEndpointRequest, dict]): - The request object. Request message for - [EndpointService.DeleteEndpoint][google.cloud.aiplatform.v1.EndpointService.DeleteEndpoint]. - name (str): - Required. The name of the Endpoint resource to be - deleted. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a endpoint_service.DeleteEndpointRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, endpoint_service.DeleteEndpointRequest): - request = endpoint_service.DeleteEndpointRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_endpoint] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def deploy_model(self, - request: Union[endpoint_service.DeployModelRequest, dict] = None, - *, - endpoint: str = None, - deployed_model: gca_endpoint.DeployedModel = None, - traffic_split: Sequence[endpoint_service.DeployModelRequest.TrafficSplitEntry] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deploys a Model into this Endpoint, creating a - DeployedModel within it. - - Args: - request (Union[google.cloud.aiplatform_v1.types.DeployModelRequest, dict]): - The request object. Request message for - [EndpointService.DeployModel][google.cloud.aiplatform.v1.EndpointService.DeployModel]. - endpoint (str): - Required. The name of the Endpoint resource into which - to deploy a Model. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - - This corresponds to the ``endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - deployed_model (google.cloud.aiplatform_v1.types.DeployedModel): - Required. The DeployedModel to be created within the - Endpoint. Note that - [Endpoint.traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] - must be updated for the DeployedModel to start receiving - traffic, either as part of this call, or via - [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1.EndpointService.UpdateEndpoint]. - - This corresponds to the ``deployed_model`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - traffic_split (Sequence[google.cloud.aiplatform_v1.types.DeployModelRequest.TrafficSplitEntry]): - A map from a DeployedModel's ID to the percentage of - this Endpoint's traffic that should be forwarded to that - DeployedModel. - - If this field is non-empty, then the Endpoint's - [traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] - will be overwritten with it. To refer to the ID of the - just being deployed Model, a "0" should be used, and the - actual ID of the new DeployedModel will be filled in its - place by this method. The traffic percentage values must - add up to 100. - - If this field is empty, then the Endpoint's - [traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] - is not updated. - - This corresponds to the ``traffic_split`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1.types.DeployModelResponse` - Response message for - [EndpointService.DeployModel][google.cloud.aiplatform.v1.EndpointService.DeployModel]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, deployed_model, traffic_split]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a endpoint_service.DeployModelRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, endpoint_service.DeployModelRequest): - request = endpoint_service.DeployModelRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if endpoint is not None: - request.endpoint = endpoint - if deployed_model is not None: - request.deployed_model = deployed_model - if traffic_split is not None: - request.traffic_split = traffic_split - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.deploy_model] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("endpoint", request.endpoint), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - endpoint_service.DeployModelResponse, - metadata_type=endpoint_service.DeployModelOperationMetadata, - ) - - # Done; return the response. - return response - - def undeploy_model(self, - request: Union[endpoint_service.UndeployModelRequest, dict] = None, - *, - endpoint: str = None, - deployed_model_id: str = None, - traffic_split: Sequence[endpoint_service.UndeployModelRequest.TrafficSplitEntry] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Undeploys a Model from an Endpoint, removing a - DeployedModel from it, and freeing all resources it's - using. - - Args: - request (Union[google.cloud.aiplatform_v1.types.UndeployModelRequest, dict]): - The request object. Request message for - [EndpointService.UndeployModel][google.cloud.aiplatform.v1.EndpointService.UndeployModel]. - endpoint (str): - Required. The name of the Endpoint resource from which - to undeploy a Model. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - - This corresponds to the ``endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - deployed_model_id (str): - Required. The ID of the DeployedModel - to be undeployed from the Endpoint. - - This corresponds to the ``deployed_model_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - traffic_split (Sequence[google.cloud.aiplatform_v1.types.UndeployModelRequest.TrafficSplitEntry]): - If this field is provided, then the Endpoint's - [traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] - will be overwritten with it. If last DeployedModel is - being undeployed from the Endpoint, the - [Endpoint.traffic_split] will always end up empty when - this call returns. A DeployedModel will be successfully - undeployed only if it doesn't have any traffic assigned - to it when this method executes, or if this field - unassigns any traffic to it. - - This corresponds to the ``traffic_split`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1.types.UndeployModelResponse` - Response message for - [EndpointService.UndeployModel][google.cloud.aiplatform.v1.EndpointService.UndeployModel]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, deployed_model_id, traffic_split]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a endpoint_service.UndeployModelRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, endpoint_service.UndeployModelRequest): - request = endpoint_service.UndeployModelRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if endpoint is not None: - request.endpoint = endpoint - if deployed_model_id is not None: - request.deployed_model_id = deployed_model_id - if traffic_split is not None: - request.traffic_split = traffic_split - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.undeploy_model] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("endpoint", request.endpoint), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - endpoint_service.UndeployModelResponse, - metadata_type=endpoint_service.UndeployModelOperationMetadata, - ) - - # Done; return the response. - return response - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - """Releases underlying transport's resources. - - .. warning:: - ONLY use as a context manager if the transport is NOT shared - with other clients! Exiting the with block will CLOSE the transport - and may cause errors in other clients! - """ - self.transport.close() - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "EndpointServiceClient", -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/pagers.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/pagers.py deleted file mode 100644 index 513be943c2..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/pagers.py +++ /dev/null @@ -1,141 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator - -from google.cloud.aiplatform_v1.types import endpoint -from google.cloud.aiplatform_v1.types import endpoint_service - - -class ListEndpointsPager: - """A pager for iterating through ``list_endpoints`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListEndpointsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``endpoints`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListEndpoints`` requests and continue to iterate - through the ``endpoints`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListEndpointsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., endpoint_service.ListEndpointsResponse], - request: endpoint_service.ListEndpointsRequest, - response: endpoint_service.ListEndpointsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListEndpointsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListEndpointsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = endpoint_service.ListEndpointsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[endpoint_service.ListEndpointsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[endpoint.Endpoint]: - for page in self.pages: - yield from page.endpoints - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListEndpointsAsyncPager: - """A pager for iterating through ``list_endpoints`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListEndpointsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``endpoints`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListEndpoints`` requests and continue to iterate - through the ``endpoints`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListEndpointsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[endpoint_service.ListEndpointsResponse]], - request: endpoint_service.ListEndpointsRequest, - response: endpoint_service.ListEndpointsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListEndpointsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListEndpointsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = endpoint_service.ListEndpointsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[endpoint_service.ListEndpointsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[endpoint.Endpoint]: - async def async_generator(): - async for page in self.pages: - for response in page.endpoints: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/__init__.py deleted file mode 100644 index a062fc074c..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import EndpointServiceTransport -from .grpc import EndpointServiceGrpcTransport -from .grpc_asyncio import EndpointServiceGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[EndpointServiceTransport]] -_transport_registry['grpc'] = EndpointServiceGrpcTransport -_transport_registry['grpc_asyncio'] = EndpointServiceGrpcAsyncIOTransport - -__all__ = ( - 'EndpointServiceTransport', - 'EndpointServiceGrpcTransport', - 'EndpointServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/base.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/base.py deleted file mode 100644 index 6b35cbed12..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/base.py +++ /dev/null @@ -1,239 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import pkg_resources - -import google.auth # type: ignore -import google.api_core -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.aiplatform_v1.types import endpoint -from google.cloud.aiplatform_v1.types import endpoint as gca_endpoint -from google.cloud.aiplatform_v1.types import endpoint_service -from google.longrunning import operations_pb2 # type: ignore - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-aiplatform', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -class EndpointServiceTransport(abc.ABC): - """Abstract transport class for EndpointService.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'aiplatform.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials are service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.create_endpoint: gapic_v1.method.wrap_method( - self.create_endpoint, - default_timeout=None, - client_info=client_info, - ), - self.get_endpoint: gapic_v1.method.wrap_method( - self.get_endpoint, - default_timeout=None, - client_info=client_info, - ), - self.list_endpoints: gapic_v1.method.wrap_method( - self.list_endpoints, - default_timeout=None, - client_info=client_info, - ), - self.update_endpoint: gapic_v1.method.wrap_method( - self.update_endpoint, - default_timeout=None, - client_info=client_info, - ), - self.delete_endpoint: gapic_v1.method.wrap_method( - self.delete_endpoint, - default_timeout=None, - client_info=client_info, - ), - self.deploy_model: gapic_v1.method.wrap_method( - self.deploy_model, - default_timeout=None, - client_info=client_info, - ), - self.undeploy_model: gapic_v1.method.wrap_method( - self.undeploy_model, - default_timeout=None, - client_info=client_info, - ), - } - - def close(self): - """Closes resources associated with the transport. - - .. warning:: - Only call this method if the transport is NOT shared - with other clients - this may cause errors in other clients! - """ - raise NotImplementedError() - - @property - def operations_client(self): - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def create_endpoint(self) -> Callable[ - [endpoint_service.CreateEndpointRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def get_endpoint(self) -> Callable[ - [endpoint_service.GetEndpointRequest], - Union[ - endpoint.Endpoint, - Awaitable[endpoint.Endpoint] - ]]: - raise NotImplementedError() - - @property - def list_endpoints(self) -> Callable[ - [endpoint_service.ListEndpointsRequest], - Union[ - endpoint_service.ListEndpointsResponse, - Awaitable[endpoint_service.ListEndpointsResponse] - ]]: - raise NotImplementedError() - - @property - def update_endpoint(self) -> Callable[ - [endpoint_service.UpdateEndpointRequest], - Union[ - gca_endpoint.Endpoint, - Awaitable[gca_endpoint.Endpoint] - ]]: - raise NotImplementedError() - - @property - def delete_endpoint(self) -> Callable[ - [endpoint_service.DeleteEndpointRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def deploy_model(self) -> Callable[ - [endpoint_service.DeployModelRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def undeploy_model(self) -> Callable[ - [endpoint_service.UndeployModelRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'EndpointServiceTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc.py deleted file mode 100644 index 492f1a42fa..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc.py +++ /dev/null @@ -1,434 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers -from google.api_core import operations_v1 -from google.api_core import gapic_v1 -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.aiplatform_v1.types import endpoint -from google.cloud.aiplatform_v1.types import endpoint as gca_endpoint -from google.cloud.aiplatform_v1.types import endpoint_service -from google.longrunning import operations_pb2 # type: ignore -from .base import EndpointServiceTransport, DEFAULT_CLIENT_INFO - - -class EndpointServiceGrpcTransport(EndpointServiceTransport): - """gRPC backend transport for EndpointService. - - A service for managing Vertex AI's Endpoints. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_endpoint(self) -> Callable[ - [endpoint_service.CreateEndpointRequest], - operations_pb2.Operation]: - r"""Return a callable for the create endpoint method over gRPC. - - Creates an Endpoint. - - Returns: - Callable[[~.CreateEndpointRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_endpoint' not in self._stubs: - self._stubs['create_endpoint'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.EndpointService/CreateEndpoint', - request_serializer=endpoint_service.CreateEndpointRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_endpoint'] - - @property - def get_endpoint(self) -> Callable[ - [endpoint_service.GetEndpointRequest], - endpoint.Endpoint]: - r"""Return a callable for the get endpoint method over gRPC. - - Gets an Endpoint. - - Returns: - Callable[[~.GetEndpointRequest], - ~.Endpoint]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_endpoint' not in self._stubs: - self._stubs['get_endpoint'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.EndpointService/GetEndpoint', - request_serializer=endpoint_service.GetEndpointRequest.serialize, - response_deserializer=endpoint.Endpoint.deserialize, - ) - return self._stubs['get_endpoint'] - - @property - def list_endpoints(self) -> Callable[ - [endpoint_service.ListEndpointsRequest], - endpoint_service.ListEndpointsResponse]: - r"""Return a callable for the list endpoints method over gRPC. - - Lists Endpoints in a Location. - - Returns: - Callable[[~.ListEndpointsRequest], - ~.ListEndpointsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_endpoints' not in self._stubs: - self._stubs['list_endpoints'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.EndpointService/ListEndpoints', - request_serializer=endpoint_service.ListEndpointsRequest.serialize, - response_deserializer=endpoint_service.ListEndpointsResponse.deserialize, - ) - return self._stubs['list_endpoints'] - - @property - def update_endpoint(self) -> Callable[ - [endpoint_service.UpdateEndpointRequest], - gca_endpoint.Endpoint]: - r"""Return a callable for the update endpoint method over gRPC. - - Updates an Endpoint. - - Returns: - Callable[[~.UpdateEndpointRequest], - ~.Endpoint]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_endpoint' not in self._stubs: - self._stubs['update_endpoint'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.EndpointService/UpdateEndpoint', - request_serializer=endpoint_service.UpdateEndpointRequest.serialize, - response_deserializer=gca_endpoint.Endpoint.deserialize, - ) - return self._stubs['update_endpoint'] - - @property - def delete_endpoint(self) -> Callable[ - [endpoint_service.DeleteEndpointRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete endpoint method over gRPC. - - Deletes an Endpoint. - - Returns: - Callable[[~.DeleteEndpointRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_endpoint' not in self._stubs: - self._stubs['delete_endpoint'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.EndpointService/DeleteEndpoint', - request_serializer=endpoint_service.DeleteEndpointRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_endpoint'] - - @property - def deploy_model(self) -> Callable[ - [endpoint_service.DeployModelRequest], - operations_pb2.Operation]: - r"""Return a callable for the deploy model method over gRPC. - - Deploys a Model into this Endpoint, creating a - DeployedModel within it. - - Returns: - Callable[[~.DeployModelRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'deploy_model' not in self._stubs: - self._stubs['deploy_model'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.EndpointService/DeployModel', - request_serializer=endpoint_service.DeployModelRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['deploy_model'] - - @property - def undeploy_model(self) -> Callable[ - [endpoint_service.UndeployModelRequest], - operations_pb2.Operation]: - r"""Return a callable for the undeploy model method over gRPC. - - Undeploys a Model from an Endpoint, removing a - DeployedModel from it, and freeing all resources it's - using. - - Returns: - Callable[[~.UndeployModelRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'undeploy_model' not in self._stubs: - self._stubs['undeploy_model'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.EndpointService/UndeployModel', - request_serializer=endpoint_service.UndeployModelRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['undeploy_model'] - - def close(self): - self.grpc_channel.close() - -__all__ = ( - 'EndpointServiceGrpcTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc_asyncio.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc_asyncio.py deleted file mode 100644 index a079f7a528..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/endpoint_service/transports/grpc_asyncio.py +++ /dev/null @@ -1,438 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers_async -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.aiplatform_v1.types import endpoint -from google.cloud.aiplatform_v1.types import endpoint as gca_endpoint -from google.cloud.aiplatform_v1.types import endpoint_service -from google.longrunning import operations_pb2 # type: ignore -from .base import EndpointServiceTransport, DEFAULT_CLIENT_INFO -from .grpc import EndpointServiceGrpcTransport - - -class EndpointServiceGrpcAsyncIOTransport(EndpointServiceTransport): - """gRPC AsyncIO backend transport for EndpointService. - - A service for managing Vertex AI's Endpoints. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsAsyncClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_endpoint(self) -> Callable[ - [endpoint_service.CreateEndpointRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the create endpoint method over gRPC. - - Creates an Endpoint. - - Returns: - Callable[[~.CreateEndpointRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_endpoint' not in self._stubs: - self._stubs['create_endpoint'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.EndpointService/CreateEndpoint', - request_serializer=endpoint_service.CreateEndpointRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_endpoint'] - - @property - def get_endpoint(self) -> Callable[ - [endpoint_service.GetEndpointRequest], - Awaitable[endpoint.Endpoint]]: - r"""Return a callable for the get endpoint method over gRPC. - - Gets an Endpoint. - - Returns: - Callable[[~.GetEndpointRequest], - Awaitable[~.Endpoint]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_endpoint' not in self._stubs: - self._stubs['get_endpoint'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.EndpointService/GetEndpoint', - request_serializer=endpoint_service.GetEndpointRequest.serialize, - response_deserializer=endpoint.Endpoint.deserialize, - ) - return self._stubs['get_endpoint'] - - @property - def list_endpoints(self) -> Callable[ - [endpoint_service.ListEndpointsRequest], - Awaitable[endpoint_service.ListEndpointsResponse]]: - r"""Return a callable for the list endpoints method over gRPC. - - Lists Endpoints in a Location. - - Returns: - Callable[[~.ListEndpointsRequest], - Awaitable[~.ListEndpointsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_endpoints' not in self._stubs: - self._stubs['list_endpoints'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.EndpointService/ListEndpoints', - request_serializer=endpoint_service.ListEndpointsRequest.serialize, - response_deserializer=endpoint_service.ListEndpointsResponse.deserialize, - ) - return self._stubs['list_endpoints'] - - @property - def update_endpoint(self) -> Callable[ - [endpoint_service.UpdateEndpointRequest], - Awaitable[gca_endpoint.Endpoint]]: - r"""Return a callable for the update endpoint method over gRPC. - - Updates an Endpoint. - - Returns: - Callable[[~.UpdateEndpointRequest], - Awaitable[~.Endpoint]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_endpoint' not in self._stubs: - self._stubs['update_endpoint'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.EndpointService/UpdateEndpoint', - request_serializer=endpoint_service.UpdateEndpointRequest.serialize, - response_deserializer=gca_endpoint.Endpoint.deserialize, - ) - return self._stubs['update_endpoint'] - - @property - def delete_endpoint(self) -> Callable[ - [endpoint_service.DeleteEndpointRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete endpoint method over gRPC. - - Deletes an Endpoint. - - Returns: - Callable[[~.DeleteEndpointRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_endpoint' not in self._stubs: - self._stubs['delete_endpoint'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.EndpointService/DeleteEndpoint', - request_serializer=endpoint_service.DeleteEndpointRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_endpoint'] - - @property - def deploy_model(self) -> Callable[ - [endpoint_service.DeployModelRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the deploy model method over gRPC. - - Deploys a Model into this Endpoint, creating a - DeployedModel within it. - - Returns: - Callable[[~.DeployModelRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'deploy_model' not in self._stubs: - self._stubs['deploy_model'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.EndpointService/DeployModel', - request_serializer=endpoint_service.DeployModelRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['deploy_model'] - - @property - def undeploy_model(self) -> Callable[ - [endpoint_service.UndeployModelRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the undeploy model method over gRPC. - - Undeploys a Model from an Endpoint, removing a - DeployedModel from it, and freeing all resources it's - using. - - Returns: - Callable[[~.UndeployModelRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'undeploy_model' not in self._stubs: - self._stubs['undeploy_model'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.EndpointService/UndeployModel', - request_serializer=endpoint_service.UndeployModelRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['undeploy_model'] - - def close(self): - return self.grpc_channel.close() - - -__all__ = ( - 'EndpointServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/__init__.py deleted file mode 100644 index e009ebaec2..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import FeaturestoreOnlineServingServiceClient -from .async_client import FeaturestoreOnlineServingServiceAsyncClient - -__all__ = ( - 'FeaturestoreOnlineServingServiceClient', - 'FeaturestoreOnlineServingServiceAsyncClient', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/async_client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/async_client.py deleted file mode 100644 index ed5a171d25..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/async_client.py +++ /dev/null @@ -1,332 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, AsyncIterable, Awaitable, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core.client_options import ClientOptions -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.cloud.aiplatform_v1.types import featurestore_online_service -from .transports.base import FeaturestoreOnlineServingServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import FeaturestoreOnlineServingServiceGrpcAsyncIOTransport -from .client import FeaturestoreOnlineServingServiceClient - - -class FeaturestoreOnlineServingServiceAsyncClient: - """A service for serving online feature values.""" - - _client: FeaturestoreOnlineServingServiceClient - - DEFAULT_ENDPOINT = FeaturestoreOnlineServingServiceClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = FeaturestoreOnlineServingServiceClient.DEFAULT_MTLS_ENDPOINT - - entity_type_path = staticmethod(FeaturestoreOnlineServingServiceClient.entity_type_path) - parse_entity_type_path = staticmethod(FeaturestoreOnlineServingServiceClient.parse_entity_type_path) - common_billing_account_path = staticmethod(FeaturestoreOnlineServingServiceClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(FeaturestoreOnlineServingServiceClient.parse_common_billing_account_path) - common_folder_path = staticmethod(FeaturestoreOnlineServingServiceClient.common_folder_path) - parse_common_folder_path = staticmethod(FeaturestoreOnlineServingServiceClient.parse_common_folder_path) - common_organization_path = staticmethod(FeaturestoreOnlineServingServiceClient.common_organization_path) - parse_common_organization_path = staticmethod(FeaturestoreOnlineServingServiceClient.parse_common_organization_path) - common_project_path = staticmethod(FeaturestoreOnlineServingServiceClient.common_project_path) - parse_common_project_path = staticmethod(FeaturestoreOnlineServingServiceClient.parse_common_project_path) - common_location_path = staticmethod(FeaturestoreOnlineServingServiceClient.common_location_path) - parse_common_location_path = staticmethod(FeaturestoreOnlineServingServiceClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - FeaturestoreOnlineServingServiceAsyncClient: The constructed client. - """ - return FeaturestoreOnlineServingServiceClient.from_service_account_info.__func__(FeaturestoreOnlineServingServiceAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - FeaturestoreOnlineServingServiceAsyncClient: The constructed client. - """ - return FeaturestoreOnlineServingServiceClient.from_service_account_file.__func__(FeaturestoreOnlineServingServiceAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> FeaturestoreOnlineServingServiceTransport: - """Returns the transport used by the client instance. - - Returns: - FeaturestoreOnlineServingServiceTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(FeaturestoreOnlineServingServiceClient).get_transport_class, type(FeaturestoreOnlineServingServiceClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, FeaturestoreOnlineServingServiceTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the featurestore online serving service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.FeaturestoreOnlineServingServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = FeaturestoreOnlineServingServiceClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def read_feature_values(self, - request: Union[featurestore_online_service.ReadFeatureValuesRequest, dict] = None, - *, - entity_type: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> featurestore_online_service.ReadFeatureValuesResponse: - r"""Reads Feature values of a specific entity of an - EntityType. For reading feature values of multiple - entities of an EntityType, please use - StreamingReadFeatureValues. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ReadFeatureValuesRequest, dict]): - The request object. Request message for - [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1.FeaturestoreOnlineServingService.ReadFeatureValues]. - entity_type (:class:`str`): - Required. The resource name of the EntityType for the - entity being read. Value format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}``. - For example, for a machine learning model predicting - user clicks on a website, an EntityType ID could be - ``user``. - - This corresponds to the ``entity_type`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.ReadFeatureValuesResponse: - Response message for - [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1.FeaturestoreOnlineServingService.ReadFeatureValues]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([entity_type]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_online_service.ReadFeatureValuesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if entity_type is not None: - request.entity_type = entity_type - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.read_feature_values, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("entity_type", request.entity_type), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def streaming_read_feature_values(self, - request: Union[featurestore_online_service.StreamingReadFeatureValuesRequest, dict] = None, - *, - entity_type: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> Awaitable[AsyncIterable[featurestore_online_service.ReadFeatureValuesResponse]]: - r"""Reads Feature values for multiple entities. Depending - on their size, data for different entities may be broken - up across multiple responses. - - Args: - request (Union[google.cloud.aiplatform_v1.types.StreamingReadFeatureValuesRequest, dict]): - The request object. Request message for - [FeaturestoreOnlineServingService.StreamingFeatureValuesRead][]. - entity_type (:class:`str`): - Required. The resource name of the entities' type. Value - format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}``. - For example, for a machine learning model predicting - user clicks on a website, an EntityType ID could be - ``user``. - - This corresponds to the ``entity_type`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - AsyncIterable[google.cloud.aiplatform_v1.types.ReadFeatureValuesResponse]: - Response message for - [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1.FeaturestoreOnlineServingService.ReadFeatureValues]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([entity_type]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_online_service.StreamingReadFeatureValuesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if entity_type is not None: - request.entity_type = entity_type - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.streaming_read_feature_values, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("entity_type", request.entity_type), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def __aenter__(self): - return self - - async def __aexit__(self, exc_type, exc, tb): - await self.transport.close() - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "FeaturestoreOnlineServingServiceAsyncClient", -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/client.py deleted file mode 100644 index 84cbb49412..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/client.py +++ /dev/null @@ -1,530 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import os -import re -from typing import Dict, Optional, Iterable, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.cloud.aiplatform_v1.types import featurestore_online_service -from .transports.base import FeaturestoreOnlineServingServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import FeaturestoreOnlineServingServiceGrpcTransport -from .transports.grpc_asyncio import FeaturestoreOnlineServingServiceGrpcAsyncIOTransport - - -class FeaturestoreOnlineServingServiceClientMeta(type): - """Metaclass for the FeaturestoreOnlineServingService client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[FeaturestoreOnlineServingServiceTransport]] - _transport_registry["grpc"] = FeaturestoreOnlineServingServiceGrpcTransport - _transport_registry["grpc_asyncio"] = FeaturestoreOnlineServingServiceGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[FeaturestoreOnlineServingServiceTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class FeaturestoreOnlineServingServiceClient(metaclass=FeaturestoreOnlineServingServiceClientMeta): - """A service for serving online feature values.""" - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "aiplatform.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - FeaturestoreOnlineServingServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - FeaturestoreOnlineServingServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> FeaturestoreOnlineServingServiceTransport: - """Returns the transport used by the client instance. - - Returns: - FeaturestoreOnlineServingServiceTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def entity_type_path(project: str,location: str,featurestore: str,entity_type: str,) -> str: - """Returns a fully-qualified entity_type string.""" - return "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}".format(project=project, location=location, featurestore=featurestore, entity_type=entity_type, ) - - @staticmethod - def parse_entity_type_path(path: str) -> Dict[str,str]: - """Parses a entity_type path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/featurestores/(?P.+?)/entityTypes/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, FeaturestoreOnlineServingServiceTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the featurestore online serving service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, FeaturestoreOnlineServingServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): - raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, FeaturestoreOnlineServingServiceTransport): - # transport is a FeaturestoreOnlineServingServiceTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - always_use_jwt_access=True, - ) - - def read_feature_values(self, - request: Union[featurestore_online_service.ReadFeatureValuesRequest, dict] = None, - *, - entity_type: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> featurestore_online_service.ReadFeatureValuesResponse: - r"""Reads Feature values of a specific entity of an - EntityType. For reading feature values of multiple - entities of an EntityType, please use - StreamingReadFeatureValues. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ReadFeatureValuesRequest, dict]): - The request object. Request message for - [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1.FeaturestoreOnlineServingService.ReadFeatureValues]. - entity_type (str): - Required. The resource name of the EntityType for the - entity being read. Value format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}``. - For example, for a machine learning model predicting - user clicks on a website, an EntityType ID could be - ``user``. - - This corresponds to the ``entity_type`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.ReadFeatureValuesResponse: - Response message for - [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1.FeaturestoreOnlineServingService.ReadFeatureValues]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([entity_type]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_online_service.ReadFeatureValuesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_online_service.ReadFeatureValuesRequest): - request = featurestore_online_service.ReadFeatureValuesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if entity_type is not None: - request.entity_type = entity_type - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.read_feature_values] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("entity_type", request.entity_type), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def streaming_read_feature_values(self, - request: Union[featurestore_online_service.StreamingReadFeatureValuesRequest, dict] = None, - *, - entity_type: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> Iterable[featurestore_online_service.ReadFeatureValuesResponse]: - r"""Reads Feature values for multiple entities. Depending - on their size, data for different entities may be broken - up across multiple responses. - - Args: - request (Union[google.cloud.aiplatform_v1.types.StreamingReadFeatureValuesRequest, dict]): - The request object. Request message for - [FeaturestoreOnlineServingService.StreamingFeatureValuesRead][]. - entity_type (str): - Required. The resource name of the entities' type. Value - format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}``. - For example, for a machine learning model predicting - user clicks on a website, an EntityType ID could be - ``user``. - - This corresponds to the ``entity_type`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - Iterable[google.cloud.aiplatform_v1.types.ReadFeatureValuesResponse]: - Response message for - [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1.FeaturestoreOnlineServingService.ReadFeatureValues]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([entity_type]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_online_service.StreamingReadFeatureValuesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_online_service.StreamingReadFeatureValuesRequest): - request = featurestore_online_service.StreamingReadFeatureValuesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if entity_type is not None: - request.entity_type = entity_type - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.streaming_read_feature_values] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("entity_type", request.entity_type), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - """Releases underlying transport's resources. - - .. warning:: - ONLY use as a context manager if the transport is NOT shared - with other clients! Exiting the with block will CLOSE the transport - and may cause errors in other clients! - """ - self.transport.close() - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "FeaturestoreOnlineServingServiceClient", -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/__init__.py deleted file mode 100644 index d1abcd0c43..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import FeaturestoreOnlineServingServiceTransport -from .grpc import FeaturestoreOnlineServingServiceGrpcTransport -from .grpc_asyncio import FeaturestoreOnlineServingServiceGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[FeaturestoreOnlineServingServiceTransport]] -_transport_registry['grpc'] = FeaturestoreOnlineServingServiceGrpcTransport -_transport_registry['grpc_asyncio'] = FeaturestoreOnlineServingServiceGrpcAsyncIOTransport - -__all__ = ( - 'FeaturestoreOnlineServingServiceTransport', - 'FeaturestoreOnlineServingServiceGrpcTransport', - 'FeaturestoreOnlineServingServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/base.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/base.py deleted file mode 100644 index b92bd622eb..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/base.py +++ /dev/null @@ -1,160 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import pkg_resources - -import google.auth # type: ignore -import google.api_core -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.aiplatform_v1.types import featurestore_online_service - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-aiplatform', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -class FeaturestoreOnlineServingServiceTransport(abc.ABC): - """Abstract transport class for FeaturestoreOnlineServingService.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'aiplatform.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials are service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.read_feature_values: gapic_v1.method.wrap_method( - self.read_feature_values, - default_timeout=None, - client_info=client_info, - ), - self.streaming_read_feature_values: gapic_v1.method.wrap_method( - self.streaming_read_feature_values, - default_timeout=None, - client_info=client_info, - ), - } - - def close(self): - """Closes resources associated with the transport. - - .. warning:: - Only call this method if the transport is NOT shared - with other clients - this may cause errors in other clients! - """ - raise NotImplementedError() - - @property - def read_feature_values(self) -> Callable[ - [featurestore_online_service.ReadFeatureValuesRequest], - Union[ - featurestore_online_service.ReadFeatureValuesResponse, - Awaitable[featurestore_online_service.ReadFeatureValuesResponse] - ]]: - raise NotImplementedError() - - @property - def streaming_read_feature_values(self) -> Callable[ - [featurestore_online_service.StreamingReadFeatureValuesRequest], - Union[ - featurestore_online_service.ReadFeatureValuesResponse, - Awaitable[featurestore_online_service.ReadFeatureValuesResponse] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'FeaturestoreOnlineServingServiceTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/grpc.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/grpc.py deleted file mode 100644 index d251cb88e0..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/grpc.py +++ /dev/null @@ -1,285 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers -from google.api_core import gapic_v1 -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.aiplatform_v1.types import featurestore_online_service -from .base import FeaturestoreOnlineServingServiceTransport, DEFAULT_CLIENT_INFO - - -class FeaturestoreOnlineServingServiceGrpcTransport(FeaturestoreOnlineServingServiceTransport): - """gRPC backend transport for FeaturestoreOnlineServingService. - - A service for serving online feature values. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def read_feature_values(self) -> Callable[ - [featurestore_online_service.ReadFeatureValuesRequest], - featurestore_online_service.ReadFeatureValuesResponse]: - r"""Return a callable for the read feature values method over gRPC. - - Reads Feature values of a specific entity of an - EntityType. For reading feature values of multiple - entities of an EntityType, please use - StreamingReadFeatureValues. - - Returns: - Callable[[~.ReadFeatureValuesRequest], - ~.ReadFeatureValuesResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'read_feature_values' not in self._stubs: - self._stubs['read_feature_values'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.FeaturestoreOnlineServingService/ReadFeatureValues', - request_serializer=featurestore_online_service.ReadFeatureValuesRequest.serialize, - response_deserializer=featurestore_online_service.ReadFeatureValuesResponse.deserialize, - ) - return self._stubs['read_feature_values'] - - @property - def streaming_read_feature_values(self) -> Callable[ - [featurestore_online_service.StreamingReadFeatureValuesRequest], - featurestore_online_service.ReadFeatureValuesResponse]: - r"""Return a callable for the streaming read feature values method over gRPC. - - Reads Feature values for multiple entities. Depending - on their size, data for different entities may be broken - up across multiple responses. - - Returns: - Callable[[~.StreamingReadFeatureValuesRequest], - ~.ReadFeatureValuesResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'streaming_read_feature_values' not in self._stubs: - self._stubs['streaming_read_feature_values'] = self.grpc_channel.unary_stream( - '/google.cloud.aiplatform.v1.FeaturestoreOnlineServingService/StreamingReadFeatureValues', - request_serializer=featurestore_online_service.StreamingReadFeatureValuesRequest.serialize, - response_deserializer=featurestore_online_service.ReadFeatureValuesResponse.deserialize, - ) - return self._stubs['streaming_read_feature_values'] - - def close(self): - self.grpc_channel.close() - -__all__ = ( - 'FeaturestoreOnlineServingServiceGrpcTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/grpc_asyncio.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/grpc_asyncio.py deleted file mode 100644 index 281635f06f..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/grpc_asyncio.py +++ /dev/null @@ -1,289 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers_async -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.aiplatform_v1.types import featurestore_online_service -from .base import FeaturestoreOnlineServingServiceTransport, DEFAULT_CLIENT_INFO -from .grpc import FeaturestoreOnlineServingServiceGrpcTransport - - -class FeaturestoreOnlineServingServiceGrpcAsyncIOTransport(FeaturestoreOnlineServingServiceTransport): - """gRPC AsyncIO backend transport for FeaturestoreOnlineServingService. - - A service for serving online feature values. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def read_feature_values(self) -> Callable[ - [featurestore_online_service.ReadFeatureValuesRequest], - Awaitable[featurestore_online_service.ReadFeatureValuesResponse]]: - r"""Return a callable for the read feature values method over gRPC. - - Reads Feature values of a specific entity of an - EntityType. For reading feature values of multiple - entities of an EntityType, please use - StreamingReadFeatureValues. - - Returns: - Callable[[~.ReadFeatureValuesRequest], - Awaitable[~.ReadFeatureValuesResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'read_feature_values' not in self._stubs: - self._stubs['read_feature_values'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.FeaturestoreOnlineServingService/ReadFeatureValues', - request_serializer=featurestore_online_service.ReadFeatureValuesRequest.serialize, - response_deserializer=featurestore_online_service.ReadFeatureValuesResponse.deserialize, - ) - return self._stubs['read_feature_values'] - - @property - def streaming_read_feature_values(self) -> Callable[ - [featurestore_online_service.StreamingReadFeatureValuesRequest], - Awaitable[featurestore_online_service.ReadFeatureValuesResponse]]: - r"""Return a callable for the streaming read feature values method over gRPC. - - Reads Feature values for multiple entities. Depending - on their size, data for different entities may be broken - up across multiple responses. - - Returns: - Callable[[~.StreamingReadFeatureValuesRequest], - Awaitable[~.ReadFeatureValuesResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'streaming_read_feature_values' not in self._stubs: - self._stubs['streaming_read_feature_values'] = self.grpc_channel.unary_stream( - '/google.cloud.aiplatform.v1.FeaturestoreOnlineServingService/StreamingReadFeatureValues', - request_serializer=featurestore_online_service.StreamingReadFeatureValuesRequest.serialize, - response_deserializer=featurestore_online_service.ReadFeatureValuesResponse.deserialize, - ) - return self._stubs['streaming_read_feature_values'] - - def close(self): - return self.grpc_channel.close() - - -__all__ = ( - 'FeaturestoreOnlineServingServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/__init__.py deleted file mode 100644 index 81716ce8fe..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import FeaturestoreServiceClient -from .async_client import FeaturestoreServiceAsyncClient - -__all__ = ( - 'FeaturestoreServiceClient', - 'FeaturestoreServiceAsyncClient', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/async_client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/async_client.py deleted file mode 100644 index 6b660cd40f..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/async_client.py +++ /dev/null @@ -1,2222 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core.client_options import ClientOptions -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1.services.featurestore_service import pagers -from google.cloud.aiplatform_v1.types import encryption_spec -from google.cloud.aiplatform_v1.types import entity_type -from google.cloud.aiplatform_v1.types import entity_type as gca_entity_type -from google.cloud.aiplatform_v1.types import feature -from google.cloud.aiplatform_v1.types import feature as gca_feature -from google.cloud.aiplatform_v1.types import featurestore -from google.cloud.aiplatform_v1.types import featurestore as gca_featurestore -from google.cloud.aiplatform_v1.types import featurestore_service -from google.cloud.aiplatform_v1.types import operation as gca_operation -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import FeaturestoreServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import FeaturestoreServiceGrpcAsyncIOTransport -from .client import FeaturestoreServiceClient - - -class FeaturestoreServiceAsyncClient: - """The service that handles CRUD and List for resources for - Featurestore. - """ - - _client: FeaturestoreServiceClient - - DEFAULT_ENDPOINT = FeaturestoreServiceClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = FeaturestoreServiceClient.DEFAULT_MTLS_ENDPOINT - - entity_type_path = staticmethod(FeaturestoreServiceClient.entity_type_path) - parse_entity_type_path = staticmethod(FeaturestoreServiceClient.parse_entity_type_path) - feature_path = staticmethod(FeaturestoreServiceClient.feature_path) - parse_feature_path = staticmethod(FeaturestoreServiceClient.parse_feature_path) - featurestore_path = staticmethod(FeaturestoreServiceClient.featurestore_path) - parse_featurestore_path = staticmethod(FeaturestoreServiceClient.parse_featurestore_path) - common_billing_account_path = staticmethod(FeaturestoreServiceClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(FeaturestoreServiceClient.parse_common_billing_account_path) - common_folder_path = staticmethod(FeaturestoreServiceClient.common_folder_path) - parse_common_folder_path = staticmethod(FeaturestoreServiceClient.parse_common_folder_path) - common_organization_path = staticmethod(FeaturestoreServiceClient.common_organization_path) - parse_common_organization_path = staticmethod(FeaturestoreServiceClient.parse_common_organization_path) - common_project_path = staticmethod(FeaturestoreServiceClient.common_project_path) - parse_common_project_path = staticmethod(FeaturestoreServiceClient.parse_common_project_path) - common_location_path = staticmethod(FeaturestoreServiceClient.common_location_path) - parse_common_location_path = staticmethod(FeaturestoreServiceClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - FeaturestoreServiceAsyncClient: The constructed client. - """ - return FeaturestoreServiceClient.from_service_account_info.__func__(FeaturestoreServiceAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - FeaturestoreServiceAsyncClient: The constructed client. - """ - return FeaturestoreServiceClient.from_service_account_file.__func__(FeaturestoreServiceAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> FeaturestoreServiceTransport: - """Returns the transport used by the client instance. - - Returns: - FeaturestoreServiceTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(FeaturestoreServiceClient).get_transport_class, type(FeaturestoreServiceClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, FeaturestoreServiceTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the featurestore service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.FeaturestoreServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = FeaturestoreServiceClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def create_featurestore(self, - request: Union[featurestore_service.CreateFeaturestoreRequest, dict] = None, - *, - parent: str = None, - featurestore: gca_featurestore.Featurestore = None, - featurestore_id: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Creates a new Featurestore in a given project and - location. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CreateFeaturestoreRequest, dict]): - The request object. Request message for - [FeaturestoreService.CreateFeaturestore][google.cloud.aiplatform.v1.FeaturestoreService.CreateFeaturestore]. - parent (:class:`str`): - Required. The resource name of the Location to create - Featurestores. Format: - ``projects/{project}/locations/{location}'`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - featurestore (:class:`google.cloud.aiplatform_v1.types.Featurestore`): - Required. The Featurestore to create. - This corresponds to the ``featurestore`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - featurestore_id (:class:`str`): - Required. The ID to use for this Featurestore, which - will become the final component of the Featurestore's - resource name. - - This value may be up to 60 characters, and valid - characters are ``[a-z0-9_]``. The first character cannot - be a number. - - The value must be unique within the project and - location. - - This corresponds to the ``featurestore_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Featurestore` Vertex AI Feature Store provides a centralized repository for organizing, - storing, and serving ML features. The Featurestore is - a top-level container for your features and their - values. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, featurestore, featurestore_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_service.CreateFeaturestoreRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if featurestore is not None: - request.featurestore = featurestore - if featurestore_id is not None: - request.featurestore_id = featurestore_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_featurestore, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - gca_featurestore.Featurestore, - metadata_type=featurestore_service.CreateFeaturestoreOperationMetadata, - ) - - # Done; return the response. - return response - - async def get_featurestore(self, - request: Union[featurestore_service.GetFeaturestoreRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> featurestore.Featurestore: - r"""Gets details of a single Featurestore. - - Args: - request (Union[google.cloud.aiplatform_v1.types.GetFeaturestoreRequest, dict]): - The request object. Request message for - [FeaturestoreService.GetFeaturestore][google.cloud.aiplatform.v1.FeaturestoreService.GetFeaturestore]. - name (:class:`str`): - Required. The name of the - Featurestore resource. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Featurestore: - Vertex AI Feature Store provides a - centralized repository for organizing, - storing, and serving ML features. The - Featurestore is a top-level container - for your features and their values. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_service.GetFeaturestoreRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_featurestore, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_featurestores(self, - request: Union[featurestore_service.ListFeaturestoresRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListFeaturestoresAsyncPager: - r"""Lists Featurestores in a given project and location. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListFeaturestoresRequest, dict]): - The request object. Request message for - [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1.FeaturestoreService.ListFeaturestores]. - parent (:class:`str`): - Required. The resource name of the Location to list - Featurestores. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.featurestore_service.pagers.ListFeaturestoresAsyncPager: - Response message for - [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1.FeaturestoreService.ListFeaturestores]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_service.ListFeaturestoresRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_featurestores, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListFeaturestoresAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_featurestore(self, - request: Union[featurestore_service.UpdateFeaturestoreRequest, dict] = None, - *, - featurestore: gca_featurestore.Featurestore = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Updates the parameters of a single Featurestore. - - Args: - request (Union[google.cloud.aiplatform_v1.types.UpdateFeaturestoreRequest, dict]): - The request object. Request message for - [FeaturestoreService.UpdateFeaturestore][google.cloud.aiplatform.v1.FeaturestoreService.UpdateFeaturestore]. - featurestore (:class:`google.cloud.aiplatform_v1.types.Featurestore`): - Required. The Featurestore's ``name`` field is used to - identify the Featurestore to be updated. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}`` - - This corresponds to the ``featurestore`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Field mask is used to specify the fields to be - overwritten in the Featurestore resource by the update. - The fields specified in the update_mask are relative to - the resource, not the full request. A field will be - overwritten if it is in the mask. If the user does not - provide a mask then only the non-empty fields present in - the request will be overwritten. Set the update_mask to - ``*`` to override all fields. - - Updatable fields: - - - ``labels`` - - ``online_serving_config.fixed_node_count`` - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Featurestore` Vertex AI Feature Store provides a centralized repository for organizing, - storing, and serving ML features. The Featurestore is - a top-level container for your features and their - values. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([featurestore, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_service.UpdateFeaturestoreRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if featurestore is not None: - request.featurestore = featurestore - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_featurestore, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("featurestore.name", request.featurestore.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - gca_featurestore.Featurestore, - metadata_type=featurestore_service.UpdateFeaturestoreOperationMetadata, - ) - - # Done; return the response. - return response - - async def delete_featurestore(self, - request: Union[featurestore_service.DeleteFeaturestoreRequest, dict] = None, - *, - name: str = None, - force: bool = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a single Featurestore. The Featurestore must not contain - any EntityTypes or ``force`` must be set to true for the request - to succeed. - - Args: - request (Union[google.cloud.aiplatform_v1.types.DeleteFeaturestoreRequest, dict]): - The request object. Request message for - [FeaturestoreService.DeleteFeaturestore][google.cloud.aiplatform.v1.FeaturestoreService.DeleteFeaturestore]. - name (:class:`str`): - Required. The name of the Featurestore to be deleted. - Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - force (:class:`bool`): - If set to true, any EntityTypes and - Features for this Featurestore will also - be deleted. (Otherwise, the request will - only work if the Featurestore has no - EntityTypes.) - - This corresponds to the ``force`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, force]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_service.DeleteFeaturestoreRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if force is not None: - request.force = force - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_featurestore, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def create_entity_type(self, - request: Union[featurestore_service.CreateEntityTypeRequest, dict] = None, - *, - parent: str = None, - entity_type: gca_entity_type.EntityType = None, - entity_type_id: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Creates a new EntityType in a given Featurestore. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CreateEntityTypeRequest, dict]): - The request object. Request message for - [FeaturestoreService.CreateEntityType][google.cloud.aiplatform.v1.FeaturestoreService.CreateEntityType]. - parent (:class:`str`): - Required. The resource name of the Featurestore to - create EntityTypes. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - entity_type (:class:`google.cloud.aiplatform_v1.types.EntityType`): - The EntityType to create. - This corresponds to the ``entity_type`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - entity_type_id (:class:`str`): - Required. The ID to use for the EntityType, which will - become the final component of the EntityType's resource - name. - - This value may be up to 60 characters, and valid - characters are ``[a-z0-9_]``. The first character cannot - be a number. - - The value must be unique within a featurestore. - - This corresponds to the ``entity_type_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.EntityType` An entity type is a type of object in a system that needs to be modeled and - have stored information about. For example, driver is - an entity type, and driver0 is an instance of an - entity type driver. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, entity_type, entity_type_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_service.CreateEntityTypeRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if entity_type is not None: - request.entity_type = entity_type - if entity_type_id is not None: - request.entity_type_id = entity_type_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_entity_type, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - gca_entity_type.EntityType, - metadata_type=featurestore_service.CreateEntityTypeOperationMetadata, - ) - - # Done; return the response. - return response - - async def get_entity_type(self, - request: Union[featurestore_service.GetEntityTypeRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> entity_type.EntityType: - r"""Gets details of a single EntityType. - - Args: - request (Union[google.cloud.aiplatform_v1.types.GetEntityTypeRequest, dict]): - The request object. Request message for - [FeaturestoreService.GetEntityType][google.cloud.aiplatform.v1.FeaturestoreService.GetEntityType]. - name (:class:`str`): - Required. The name of the EntityType resource. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.EntityType: - An entity type is a type of object in - a system that needs to be modeled and - have stored information about. For - example, driver is an entity type, and - driver0 is an instance of an entity type - driver. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_service.GetEntityTypeRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_entity_type, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_entity_types(self, - request: Union[featurestore_service.ListEntityTypesRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListEntityTypesAsyncPager: - r"""Lists EntityTypes in a given Featurestore. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListEntityTypesRequest, dict]): - The request object. Request message for - [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1.FeaturestoreService.ListEntityTypes]. - parent (:class:`str`): - Required. The resource name of the Featurestore to list - EntityTypes. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.featurestore_service.pagers.ListEntityTypesAsyncPager: - Response message for - [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1.FeaturestoreService.ListEntityTypes]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_service.ListEntityTypesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_entity_types, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListEntityTypesAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_entity_type(self, - request: Union[featurestore_service.UpdateEntityTypeRequest, dict] = None, - *, - entity_type: gca_entity_type.EntityType = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_entity_type.EntityType: - r"""Updates the parameters of a single EntityType. - - Args: - request (Union[google.cloud.aiplatform_v1.types.UpdateEntityTypeRequest, dict]): - The request object. Request message for - [FeaturestoreService.UpdateEntityType][google.cloud.aiplatform.v1.FeaturestoreService.UpdateEntityType]. - entity_type (:class:`google.cloud.aiplatform_v1.types.EntityType`): - Required. The EntityType's ``name`` field is used to - identify the EntityType to be updated. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - - This corresponds to the ``entity_type`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Field mask is used to specify the fields to be - overwritten in the EntityType resource by the update. - The fields specified in the update_mask are relative to - the resource, not the full request. A field will be - overwritten if it is in the mask. If the user does not - provide a mask then only the non-empty fields present in - the request will be overwritten. Set the update_mask to - ``*`` to override all fields. - - Updatable fields: - - - ``description`` - - ``labels`` - - ``monitoring_config.snapshot_analysis.disabled`` - - ``monitoring_config.snapshot_analysis.monitoring_interval`` - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.EntityType: - An entity type is a type of object in - a system that needs to be modeled and - have stored information about. For - example, driver is an entity type, and - driver0 is an instance of an entity type - driver. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([entity_type, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_service.UpdateEntityTypeRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if entity_type is not None: - request.entity_type = entity_type - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_entity_type, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("entity_type.name", request.entity_type.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_entity_type(self, - request: Union[featurestore_service.DeleteEntityTypeRequest, dict] = None, - *, - name: str = None, - force: bool = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a single EntityType. The EntityType must not have any - Features or ``force`` must be set to true for the request to - succeed. - - Args: - request (Union[google.cloud.aiplatform_v1.types.DeleteEntityTypeRequest, dict]): - The request object. Request message for - [FeaturestoreService.DeleteEntityTypes][]. - name (:class:`str`): - Required. The name of the EntityType to be deleted. - Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - force (:class:`bool`): - If set to true, any Features for this - EntityType will also be deleted. - (Otherwise, the request will only work - if the EntityType has no Features.) - - This corresponds to the ``force`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, force]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_service.DeleteEntityTypeRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if force is not None: - request.force = force - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_entity_type, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def create_feature(self, - request: Union[featurestore_service.CreateFeatureRequest, dict] = None, - *, - parent: str = None, - feature: gca_feature.Feature = None, - feature_id: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Creates a new Feature in a given EntityType. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CreateFeatureRequest, dict]): - The request object. Request message for - [FeaturestoreService.CreateFeature][google.cloud.aiplatform.v1.FeaturestoreService.CreateFeature]. - parent (:class:`str`): - Required. The resource name of the EntityType to create - a Feature. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - feature (:class:`google.cloud.aiplatform_v1.types.Feature`): - Required. The Feature to create. - This corresponds to the ``feature`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - feature_id (:class:`str`): - Required. The ID to use for the Feature, which will - become the final component of the Feature's resource - name. - - This value may be up to 60 characters, and valid - characters are ``[a-z0-9_]``. The first character cannot - be a number. - - The value must be unique within an EntityType. - - This corresponds to the ``feature_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Feature` Feature Metadata information that describes an attribute of an entity type. - For example, apple is an entity type, and color is a - feature that describes apple. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, feature, feature_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_service.CreateFeatureRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if feature is not None: - request.feature = feature - if feature_id is not None: - request.feature_id = feature_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_feature, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - gca_feature.Feature, - metadata_type=featurestore_service.CreateFeatureOperationMetadata, - ) - - # Done; return the response. - return response - - async def batch_create_features(self, - request: Union[featurestore_service.BatchCreateFeaturesRequest, dict] = None, - *, - parent: str = None, - requests: Sequence[featurestore_service.CreateFeatureRequest] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Creates a batch of Features in a given EntityType. - - Args: - request (Union[google.cloud.aiplatform_v1.types.BatchCreateFeaturesRequest, dict]): - The request object. Request message for - [FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1.FeaturestoreService.BatchCreateFeatures]. - parent (:class:`str`): - Required. The resource name of the EntityType to create - the batch of Features under. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - requests (:class:`Sequence[google.cloud.aiplatform_v1.types.CreateFeatureRequest]`): - Required. The request message specifying the Features to - create. All Features must be created under the same - parent EntityType. The ``parent`` field in each child - request message can be omitted. If ``parent`` is set in - a child request, then the value must match the - ``parent`` value in this request message. - - This corresponds to the ``requests`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1.types.BatchCreateFeaturesResponse` - Response message for - [FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1.FeaturestoreService.BatchCreateFeatures]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, requests]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_service.BatchCreateFeaturesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if requests: - request.requests.extend(requests) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.batch_create_features, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - featurestore_service.BatchCreateFeaturesResponse, - metadata_type=featurestore_service.BatchCreateFeaturesOperationMetadata, - ) - - # Done; return the response. - return response - - async def get_feature(self, - request: Union[featurestore_service.GetFeatureRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> feature.Feature: - r"""Gets details of a single Feature. - - Args: - request (Union[google.cloud.aiplatform_v1.types.GetFeatureRequest, dict]): - The request object. Request message for - [FeaturestoreService.GetFeature][google.cloud.aiplatform.v1.FeaturestoreService.GetFeature]. - name (:class:`str`): - Required. The name of the Feature resource. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Feature: - Feature Metadata information that - describes an attribute of an entity - type. For example, apple is an entity - type, and color is a feature that - describes apple. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_service.GetFeatureRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_feature, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_features(self, - request: Union[featurestore_service.ListFeaturesRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListFeaturesAsyncPager: - r"""Lists Features in a given EntityType. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListFeaturesRequest, dict]): - The request object. Request message for - [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1.FeaturestoreService.ListFeatures]. - parent (:class:`str`): - Required. The resource name of the Location to list - Features. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.featurestore_service.pagers.ListFeaturesAsyncPager: - Response message for - [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1.FeaturestoreService.ListFeatures]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_service.ListFeaturesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_features, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListFeaturesAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_feature(self, - request: Union[featurestore_service.UpdateFeatureRequest, dict] = None, - *, - feature: gca_feature.Feature = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_feature.Feature: - r"""Updates the parameters of a single Feature. - - Args: - request (Union[google.cloud.aiplatform_v1.types.UpdateFeatureRequest, dict]): - The request object. Request message for - [FeaturestoreService.UpdateFeature][google.cloud.aiplatform.v1.FeaturestoreService.UpdateFeature]. - feature (:class:`google.cloud.aiplatform_v1.types.Feature`): - Required. The Feature's ``name`` field is used to - identify the Feature to be updated. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}`` - - This corresponds to the ``feature`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Field mask is used to specify the fields to be - overwritten in the Features resource by the update. The - fields specified in the update_mask are relative to the - resource, not the full request. A field will be - overwritten if it is in the mask. If the user does not - provide a mask then only the non-empty fields present in - the request will be overwritten. Set the update_mask to - ``*`` to override all fields. - - Updatable fields: - - - ``description`` - - ``labels`` - - ``monitoring_config.snapshot_analysis.disabled`` - - ``monitoring_config.snapshot_analysis.monitoring_interval`` - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Feature: - Feature Metadata information that - describes an attribute of an entity - type. For example, apple is an entity - type, and color is a feature that - describes apple. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([feature, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_service.UpdateFeatureRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if feature is not None: - request.feature = feature - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_feature, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("feature.name", request.feature.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_feature(self, - request: Union[featurestore_service.DeleteFeatureRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a single Feature. - - Args: - request (Union[google.cloud.aiplatform_v1.types.DeleteFeatureRequest, dict]): - The request object. Request message for - [FeaturestoreService.DeleteFeature][google.cloud.aiplatform.v1.FeaturestoreService.DeleteFeature]. - name (:class:`str`): - Required. The name of the Features to be deleted. - Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_service.DeleteFeatureRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_feature, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def import_feature_values(self, - request: Union[featurestore_service.ImportFeatureValuesRequest, dict] = None, - *, - entity_type: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Imports Feature values into the Featurestore from a - source storage. - The progress of the import is tracked by the returned - operation. The imported features are guaranteed to be - visible to subsequent read operations after the - operation is marked as successfully done. - If an import operation fails, the Feature values - returned from reads and exports may be inconsistent. If - consistency is required, the caller must retry the same - import request again and wait till the new operation - returned is marked as successfully done. - There are also scenarios where the caller can cause - inconsistency. - - Source data for import contains multiple distinct - Feature values for the same entity ID and timestamp. - - Source is modified during an import. This includes - adding, updating, or removing source data and/or - metadata. Examples of updating metadata include but are - not limited to changing storage location, storage class, - or retention policy. - - Online serving cluster is under-provisioned. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ImportFeatureValuesRequest, dict]): - The request object. Request message for - [FeaturestoreService.ImportFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.ImportFeatureValues]. - entity_type (:class:`str`): - Required. The resource name of the EntityType grouping - the Features for which values are being imported. - Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}`` - - This corresponds to the ``entity_type`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1.types.ImportFeatureValuesResponse` - Response message for - [FeaturestoreService.ImportFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.ImportFeatureValues]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([entity_type]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_service.ImportFeatureValuesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if entity_type is not None: - request.entity_type = entity_type - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.import_feature_values, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("entity_type", request.entity_type), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - featurestore_service.ImportFeatureValuesResponse, - metadata_type=featurestore_service.ImportFeatureValuesOperationMetadata, - ) - - # Done; return the response. - return response - - async def batch_read_feature_values(self, - request: Union[featurestore_service.BatchReadFeatureValuesRequest, dict] = None, - *, - featurestore: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Batch reads Feature values from a Featurestore. - This API enables batch reading Feature values, where - each read instance in the batch may read Feature values - of entities from one or more EntityTypes. Point-in-time - correctness is guaranteed for Feature values of each - read instance as of each instance's read timestamp. - - Args: - request (Union[google.cloud.aiplatform_v1.types.BatchReadFeatureValuesRequest, dict]): - The request object. Request message for - [FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.BatchReadFeatureValues]. - featurestore (:class:`str`): - Required. The resource name of the Featurestore from - which to query Feature values. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}`` - - This corresponds to the ``featurestore`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1.types.BatchReadFeatureValuesResponse` - Response message for - [FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.BatchReadFeatureValues]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([featurestore]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_service.BatchReadFeatureValuesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if featurestore is not None: - request.featurestore = featurestore - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.batch_read_feature_values, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("featurestore", request.featurestore), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - featurestore_service.BatchReadFeatureValuesResponse, - metadata_type=featurestore_service.BatchReadFeatureValuesOperationMetadata, - ) - - # Done; return the response. - return response - - async def export_feature_values(self, - request: Union[featurestore_service.ExportFeatureValuesRequest, dict] = None, - *, - entity_type: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Exports Feature values from all the entities of a - target EntityType. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ExportFeatureValuesRequest, dict]): - The request object. Request message for - [FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.ExportFeatureValues]. - entity_type (:class:`str`): - Required. The resource name of the EntityType from which - to export Feature values. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - - This corresponds to the ``entity_type`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1.types.ExportFeatureValuesResponse` - Response message for - [FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.ExportFeatureValues]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([entity_type]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_service.ExportFeatureValuesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if entity_type is not None: - request.entity_type = entity_type - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.export_feature_values, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("entity_type", request.entity_type), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - featurestore_service.ExportFeatureValuesResponse, - metadata_type=featurestore_service.ExportFeatureValuesOperationMetadata, - ) - - # Done; return the response. - return response - - async def search_features(self, - request: Union[featurestore_service.SearchFeaturesRequest, dict] = None, - *, - location: str = None, - query: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.SearchFeaturesAsyncPager: - r"""Searches Features matching a query in a given - project. - - Args: - request (Union[google.cloud.aiplatform_v1.types.SearchFeaturesRequest, dict]): - The request object. Request message for - [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1.FeaturestoreService.SearchFeatures]. - location (:class:`str`): - Required. The resource name of the Location to search - Features. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``location`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - query (:class:`str`): - Query string that is a conjunction of field-restricted - queries and/or field-restricted filters. - Field-restricted queries and filters can be combined - using ``AND`` to form a conjunction. - - A field query is in the form FIELD:QUERY. This - implicitly checks if QUERY exists as a substring within - Feature's FIELD. The QUERY and the FIELD are converted - to a sequence of words (i.e. tokens) for comparison. - This is done by: - - - Removing leading/trailing whitespace and tokenizing - the search value. Characters that are not one of - alphanumeric ``[a-zA-Z0-9]``, underscore ``_``, or - asterisk ``*`` are treated as delimiters for tokens. - ``*`` is treated as a wildcard that matches - characters within a token. - - Ignoring case. - - Prepending an asterisk to the first and appending an - asterisk to the last token in QUERY. - - A QUERY must be either a singular token or a phrase. A - phrase is one or multiple words enclosed in double - quotation marks ("). With phrases, the order of the - words is important. Words in the phrase must be matching - in order and consecutively. - - Supported FIELDs for field-restricted queries: - - - ``feature_id`` - - ``description`` - - ``entity_type_id`` - - Examples: - - - ``feature_id: foo`` --> Matches a Feature with ID - containing the substring ``foo`` (eg. ``foo``, - ``foofeature``, ``barfoo``). - - ``feature_id: foo*feature`` --> Matches a Feature - with ID containing the substring ``foo*feature`` (eg. - ``foobarfeature``). - - ``feature_id: foo AND description: bar`` --> Matches - a Feature with ID containing the substring ``foo`` - and description containing the substring ``bar``. - - Besides field queries, the following exact-match filters - are supported. The exact-match filters do not support - wildcards. Unlike field-restricted queries, exact-match - filters are case-sensitive. - - - ``feature_id``: Supports = comparisons. - - ``description``: Supports = comparisons. Multi-token - filters should be enclosed in quotes. - - ``entity_type_id``: Supports = comparisons. - - ``value_type``: Supports = and != comparisons. - - ``labels``: Supports key-value equality as well as - key presence. - - ``featurestore_id``: Supports = comparisons. - - Examples: - - - ``description = "foo bar"`` --> Any Feature with - description exactly equal to ``foo bar`` - - ``value_type = DOUBLE`` --> Features whose type is - DOUBLE. - - ``labels.active = yes AND labels.env = prod`` --> - Features having both (active: yes) and (env: prod) - labels. - - ``labels.env: *`` --> Any Feature which has a label - with ``env`` as the key. - - This corresponds to the ``query`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.featurestore_service.pagers.SearchFeaturesAsyncPager: - Response message for - [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1.FeaturestoreService.SearchFeatures]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([location, query]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_service.SearchFeaturesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if location is not None: - request.location = location - if query is not None: - request.query = query - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.search_features, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("location", request.location), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.SearchFeaturesAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def __aenter__(self): - return self - - async def __aexit__(self, exc_type, exc, tb): - await self.transport.close() - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "FeaturestoreServiceAsyncClient", -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/client.py deleted file mode 100644 index d250f03a90..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/client.py +++ /dev/null @@ -1,2438 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import os -import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1.services.featurestore_service import pagers -from google.cloud.aiplatform_v1.types import encryption_spec -from google.cloud.aiplatform_v1.types import entity_type -from google.cloud.aiplatform_v1.types import entity_type as gca_entity_type -from google.cloud.aiplatform_v1.types import feature -from google.cloud.aiplatform_v1.types import feature as gca_feature -from google.cloud.aiplatform_v1.types import featurestore -from google.cloud.aiplatform_v1.types import featurestore as gca_featurestore -from google.cloud.aiplatform_v1.types import featurestore_service -from google.cloud.aiplatform_v1.types import operation as gca_operation -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import FeaturestoreServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import FeaturestoreServiceGrpcTransport -from .transports.grpc_asyncio import FeaturestoreServiceGrpcAsyncIOTransport - - -class FeaturestoreServiceClientMeta(type): - """Metaclass for the FeaturestoreService client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[FeaturestoreServiceTransport]] - _transport_registry["grpc"] = FeaturestoreServiceGrpcTransport - _transport_registry["grpc_asyncio"] = FeaturestoreServiceGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[FeaturestoreServiceTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class FeaturestoreServiceClient(metaclass=FeaturestoreServiceClientMeta): - """The service that handles CRUD and List for resources for - Featurestore. - """ - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "aiplatform.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - FeaturestoreServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - FeaturestoreServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> FeaturestoreServiceTransport: - """Returns the transport used by the client instance. - - Returns: - FeaturestoreServiceTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def entity_type_path(project: str,location: str,featurestore: str,entity_type: str,) -> str: - """Returns a fully-qualified entity_type string.""" - return "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}".format(project=project, location=location, featurestore=featurestore, entity_type=entity_type, ) - - @staticmethod - def parse_entity_type_path(path: str) -> Dict[str,str]: - """Parses a entity_type path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/featurestores/(?P.+?)/entityTypes/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def feature_path(project: str,location: str,featurestore: str,entity_type: str,feature: str,) -> str: - """Returns a fully-qualified feature string.""" - return "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}".format(project=project, location=location, featurestore=featurestore, entity_type=entity_type, feature=feature, ) - - @staticmethod - def parse_feature_path(path: str) -> Dict[str,str]: - """Parses a feature path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/featurestores/(?P.+?)/entityTypes/(?P.+?)/features/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def featurestore_path(project: str,location: str,featurestore: str,) -> str: - """Returns a fully-qualified featurestore string.""" - return "projects/{project}/locations/{location}/featurestores/{featurestore}".format(project=project, location=location, featurestore=featurestore, ) - - @staticmethod - def parse_featurestore_path(path: str) -> Dict[str,str]: - """Parses a featurestore path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/featurestores/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, FeaturestoreServiceTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the featurestore service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, FeaturestoreServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): - raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, FeaturestoreServiceTransport): - # transport is a FeaturestoreServiceTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - always_use_jwt_access=True, - ) - - def create_featurestore(self, - request: Union[featurestore_service.CreateFeaturestoreRequest, dict] = None, - *, - parent: str = None, - featurestore: gca_featurestore.Featurestore = None, - featurestore_id: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Creates a new Featurestore in a given project and - location. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CreateFeaturestoreRequest, dict]): - The request object. Request message for - [FeaturestoreService.CreateFeaturestore][google.cloud.aiplatform.v1.FeaturestoreService.CreateFeaturestore]. - parent (str): - Required. The resource name of the Location to create - Featurestores. Format: - ``projects/{project}/locations/{location}'`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - featurestore (google.cloud.aiplatform_v1.types.Featurestore): - Required. The Featurestore to create. - This corresponds to the ``featurestore`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - featurestore_id (str): - Required. The ID to use for this Featurestore, which - will become the final component of the Featurestore's - resource name. - - This value may be up to 60 characters, and valid - characters are ``[a-z0-9_]``. The first character cannot - be a number. - - The value must be unique within the project and - location. - - This corresponds to the ``featurestore_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Featurestore` Vertex AI Feature Store provides a centralized repository for organizing, - storing, and serving ML features. The Featurestore is - a top-level container for your features and their - values. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, featurestore, featurestore_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_service.CreateFeaturestoreRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_service.CreateFeaturestoreRequest): - request = featurestore_service.CreateFeaturestoreRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if featurestore is not None: - request.featurestore = featurestore - if featurestore_id is not None: - request.featurestore_id = featurestore_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_featurestore] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - gca_featurestore.Featurestore, - metadata_type=featurestore_service.CreateFeaturestoreOperationMetadata, - ) - - # Done; return the response. - return response - - def get_featurestore(self, - request: Union[featurestore_service.GetFeaturestoreRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> featurestore.Featurestore: - r"""Gets details of a single Featurestore. - - Args: - request (Union[google.cloud.aiplatform_v1.types.GetFeaturestoreRequest, dict]): - The request object. Request message for - [FeaturestoreService.GetFeaturestore][google.cloud.aiplatform.v1.FeaturestoreService.GetFeaturestore]. - name (str): - Required. The name of the - Featurestore resource. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Featurestore: - Vertex AI Feature Store provides a - centralized repository for organizing, - storing, and serving ML features. The - Featurestore is a top-level container - for your features and their values. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_service.GetFeaturestoreRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_service.GetFeaturestoreRequest): - request = featurestore_service.GetFeaturestoreRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_featurestore] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_featurestores(self, - request: Union[featurestore_service.ListFeaturestoresRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListFeaturestoresPager: - r"""Lists Featurestores in a given project and location. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListFeaturestoresRequest, dict]): - The request object. Request message for - [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1.FeaturestoreService.ListFeaturestores]. - parent (str): - Required. The resource name of the Location to list - Featurestores. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.featurestore_service.pagers.ListFeaturestoresPager: - Response message for - [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1.FeaturestoreService.ListFeaturestores]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_service.ListFeaturestoresRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_service.ListFeaturestoresRequest): - request = featurestore_service.ListFeaturestoresRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_featurestores] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListFeaturestoresPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_featurestore(self, - request: Union[featurestore_service.UpdateFeaturestoreRequest, dict] = None, - *, - featurestore: gca_featurestore.Featurestore = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Updates the parameters of a single Featurestore. - - Args: - request (Union[google.cloud.aiplatform_v1.types.UpdateFeaturestoreRequest, dict]): - The request object. Request message for - [FeaturestoreService.UpdateFeaturestore][google.cloud.aiplatform.v1.FeaturestoreService.UpdateFeaturestore]. - featurestore (google.cloud.aiplatform_v1.types.Featurestore): - Required. The Featurestore's ``name`` field is used to - identify the Featurestore to be updated. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}`` - - This corresponds to the ``featurestore`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Field mask is used to specify the fields to be - overwritten in the Featurestore resource by the update. - The fields specified in the update_mask are relative to - the resource, not the full request. A field will be - overwritten if it is in the mask. If the user does not - provide a mask then only the non-empty fields present in - the request will be overwritten. Set the update_mask to - ``*`` to override all fields. - - Updatable fields: - - - ``labels`` - - ``online_serving_config.fixed_node_count`` - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Featurestore` Vertex AI Feature Store provides a centralized repository for organizing, - storing, and serving ML features. The Featurestore is - a top-level container for your features and their - values. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([featurestore, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_service.UpdateFeaturestoreRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_service.UpdateFeaturestoreRequest): - request = featurestore_service.UpdateFeaturestoreRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if featurestore is not None: - request.featurestore = featurestore - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_featurestore] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("featurestore.name", request.featurestore.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - gca_featurestore.Featurestore, - metadata_type=featurestore_service.UpdateFeaturestoreOperationMetadata, - ) - - # Done; return the response. - return response - - def delete_featurestore(self, - request: Union[featurestore_service.DeleteFeaturestoreRequest, dict] = None, - *, - name: str = None, - force: bool = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a single Featurestore. The Featurestore must not contain - any EntityTypes or ``force`` must be set to true for the request - to succeed. - - Args: - request (Union[google.cloud.aiplatform_v1.types.DeleteFeaturestoreRequest, dict]): - The request object. Request message for - [FeaturestoreService.DeleteFeaturestore][google.cloud.aiplatform.v1.FeaturestoreService.DeleteFeaturestore]. - name (str): - Required. The name of the Featurestore to be deleted. - Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - force (bool): - If set to true, any EntityTypes and - Features for this Featurestore will also - be deleted. (Otherwise, the request will - only work if the Featurestore has no - EntityTypes.) - - This corresponds to the ``force`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, force]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_service.DeleteFeaturestoreRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_service.DeleteFeaturestoreRequest): - request = featurestore_service.DeleteFeaturestoreRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if force is not None: - request.force = force - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_featurestore] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def create_entity_type(self, - request: Union[featurestore_service.CreateEntityTypeRequest, dict] = None, - *, - parent: str = None, - entity_type: gca_entity_type.EntityType = None, - entity_type_id: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Creates a new EntityType in a given Featurestore. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CreateEntityTypeRequest, dict]): - The request object. Request message for - [FeaturestoreService.CreateEntityType][google.cloud.aiplatform.v1.FeaturestoreService.CreateEntityType]. - parent (str): - Required. The resource name of the Featurestore to - create EntityTypes. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - entity_type (google.cloud.aiplatform_v1.types.EntityType): - The EntityType to create. - This corresponds to the ``entity_type`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - entity_type_id (str): - Required. The ID to use for the EntityType, which will - become the final component of the EntityType's resource - name. - - This value may be up to 60 characters, and valid - characters are ``[a-z0-9_]``. The first character cannot - be a number. - - The value must be unique within a featurestore. - - This corresponds to the ``entity_type_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.EntityType` An entity type is a type of object in a system that needs to be modeled and - have stored information about. For example, driver is - an entity type, and driver0 is an instance of an - entity type driver. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, entity_type, entity_type_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_service.CreateEntityTypeRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_service.CreateEntityTypeRequest): - request = featurestore_service.CreateEntityTypeRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if entity_type is not None: - request.entity_type = entity_type - if entity_type_id is not None: - request.entity_type_id = entity_type_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_entity_type] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - gca_entity_type.EntityType, - metadata_type=featurestore_service.CreateEntityTypeOperationMetadata, - ) - - # Done; return the response. - return response - - def get_entity_type(self, - request: Union[featurestore_service.GetEntityTypeRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> entity_type.EntityType: - r"""Gets details of a single EntityType. - - Args: - request (Union[google.cloud.aiplatform_v1.types.GetEntityTypeRequest, dict]): - The request object. Request message for - [FeaturestoreService.GetEntityType][google.cloud.aiplatform.v1.FeaturestoreService.GetEntityType]. - name (str): - Required. The name of the EntityType resource. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.EntityType: - An entity type is a type of object in - a system that needs to be modeled and - have stored information about. For - example, driver is an entity type, and - driver0 is an instance of an entity type - driver. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_service.GetEntityTypeRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_service.GetEntityTypeRequest): - request = featurestore_service.GetEntityTypeRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_entity_type] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_entity_types(self, - request: Union[featurestore_service.ListEntityTypesRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListEntityTypesPager: - r"""Lists EntityTypes in a given Featurestore. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListEntityTypesRequest, dict]): - The request object. Request message for - [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1.FeaturestoreService.ListEntityTypes]. - parent (str): - Required. The resource name of the Featurestore to list - EntityTypes. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.featurestore_service.pagers.ListEntityTypesPager: - Response message for - [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1.FeaturestoreService.ListEntityTypes]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_service.ListEntityTypesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_service.ListEntityTypesRequest): - request = featurestore_service.ListEntityTypesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_entity_types] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListEntityTypesPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_entity_type(self, - request: Union[featurestore_service.UpdateEntityTypeRequest, dict] = None, - *, - entity_type: gca_entity_type.EntityType = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_entity_type.EntityType: - r"""Updates the parameters of a single EntityType. - - Args: - request (Union[google.cloud.aiplatform_v1.types.UpdateEntityTypeRequest, dict]): - The request object. Request message for - [FeaturestoreService.UpdateEntityType][google.cloud.aiplatform.v1.FeaturestoreService.UpdateEntityType]. - entity_type (google.cloud.aiplatform_v1.types.EntityType): - Required. The EntityType's ``name`` field is used to - identify the EntityType to be updated. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - - This corresponds to the ``entity_type`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Field mask is used to specify the fields to be - overwritten in the EntityType resource by the update. - The fields specified in the update_mask are relative to - the resource, not the full request. A field will be - overwritten if it is in the mask. If the user does not - provide a mask then only the non-empty fields present in - the request will be overwritten. Set the update_mask to - ``*`` to override all fields. - - Updatable fields: - - - ``description`` - - ``labels`` - - ``monitoring_config.snapshot_analysis.disabled`` - - ``monitoring_config.snapshot_analysis.monitoring_interval`` - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.EntityType: - An entity type is a type of object in - a system that needs to be modeled and - have stored information about. For - example, driver is an entity type, and - driver0 is an instance of an entity type - driver. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([entity_type, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_service.UpdateEntityTypeRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_service.UpdateEntityTypeRequest): - request = featurestore_service.UpdateEntityTypeRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if entity_type is not None: - request.entity_type = entity_type - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_entity_type] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("entity_type.name", request.entity_type.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_entity_type(self, - request: Union[featurestore_service.DeleteEntityTypeRequest, dict] = None, - *, - name: str = None, - force: bool = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a single EntityType. The EntityType must not have any - Features or ``force`` must be set to true for the request to - succeed. - - Args: - request (Union[google.cloud.aiplatform_v1.types.DeleteEntityTypeRequest, dict]): - The request object. Request message for - [FeaturestoreService.DeleteEntityTypes][]. - name (str): - Required. The name of the EntityType to be deleted. - Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - force (bool): - If set to true, any Features for this - EntityType will also be deleted. - (Otherwise, the request will only work - if the EntityType has no Features.) - - This corresponds to the ``force`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, force]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_service.DeleteEntityTypeRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_service.DeleteEntityTypeRequest): - request = featurestore_service.DeleteEntityTypeRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if force is not None: - request.force = force - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_entity_type] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def create_feature(self, - request: Union[featurestore_service.CreateFeatureRequest, dict] = None, - *, - parent: str = None, - feature: gca_feature.Feature = None, - feature_id: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Creates a new Feature in a given EntityType. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CreateFeatureRequest, dict]): - The request object. Request message for - [FeaturestoreService.CreateFeature][google.cloud.aiplatform.v1.FeaturestoreService.CreateFeature]. - parent (str): - Required. The resource name of the EntityType to create - a Feature. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - feature (google.cloud.aiplatform_v1.types.Feature): - Required. The Feature to create. - This corresponds to the ``feature`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - feature_id (str): - Required. The ID to use for the Feature, which will - become the final component of the Feature's resource - name. - - This value may be up to 60 characters, and valid - characters are ``[a-z0-9_]``. The first character cannot - be a number. - - The value must be unique within an EntityType. - - This corresponds to the ``feature_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Feature` Feature Metadata information that describes an attribute of an entity type. - For example, apple is an entity type, and color is a - feature that describes apple. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, feature, feature_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_service.CreateFeatureRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_service.CreateFeatureRequest): - request = featurestore_service.CreateFeatureRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if feature is not None: - request.feature = feature - if feature_id is not None: - request.feature_id = feature_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_feature] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - gca_feature.Feature, - metadata_type=featurestore_service.CreateFeatureOperationMetadata, - ) - - # Done; return the response. - return response - - def batch_create_features(self, - request: Union[featurestore_service.BatchCreateFeaturesRequest, dict] = None, - *, - parent: str = None, - requests: Sequence[featurestore_service.CreateFeatureRequest] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Creates a batch of Features in a given EntityType. - - Args: - request (Union[google.cloud.aiplatform_v1.types.BatchCreateFeaturesRequest, dict]): - The request object. Request message for - [FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1.FeaturestoreService.BatchCreateFeatures]. - parent (str): - Required. The resource name of the EntityType to create - the batch of Features under. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - requests (Sequence[google.cloud.aiplatform_v1.types.CreateFeatureRequest]): - Required. The request message specifying the Features to - create. All Features must be created under the same - parent EntityType. The ``parent`` field in each child - request message can be omitted. If ``parent`` is set in - a child request, then the value must match the - ``parent`` value in this request message. - - This corresponds to the ``requests`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1.types.BatchCreateFeaturesResponse` - Response message for - [FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1.FeaturestoreService.BatchCreateFeatures]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, requests]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_service.BatchCreateFeaturesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_service.BatchCreateFeaturesRequest): - request = featurestore_service.BatchCreateFeaturesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if requests is not None: - request.requests = requests - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.batch_create_features] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - featurestore_service.BatchCreateFeaturesResponse, - metadata_type=featurestore_service.BatchCreateFeaturesOperationMetadata, - ) - - # Done; return the response. - return response - - def get_feature(self, - request: Union[featurestore_service.GetFeatureRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> feature.Feature: - r"""Gets details of a single Feature. - - Args: - request (Union[google.cloud.aiplatform_v1.types.GetFeatureRequest, dict]): - The request object. Request message for - [FeaturestoreService.GetFeature][google.cloud.aiplatform.v1.FeaturestoreService.GetFeature]. - name (str): - Required. The name of the Feature resource. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Feature: - Feature Metadata information that - describes an attribute of an entity - type. For example, apple is an entity - type, and color is a feature that - describes apple. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_service.GetFeatureRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_service.GetFeatureRequest): - request = featurestore_service.GetFeatureRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_feature] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_features(self, - request: Union[featurestore_service.ListFeaturesRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListFeaturesPager: - r"""Lists Features in a given EntityType. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListFeaturesRequest, dict]): - The request object. Request message for - [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1.FeaturestoreService.ListFeatures]. - parent (str): - Required. The resource name of the Location to list - Features. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.featurestore_service.pagers.ListFeaturesPager: - Response message for - [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1.FeaturestoreService.ListFeatures]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_service.ListFeaturesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_service.ListFeaturesRequest): - request = featurestore_service.ListFeaturesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_features] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListFeaturesPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_feature(self, - request: Union[featurestore_service.UpdateFeatureRequest, dict] = None, - *, - feature: gca_feature.Feature = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_feature.Feature: - r"""Updates the parameters of a single Feature. - - Args: - request (Union[google.cloud.aiplatform_v1.types.UpdateFeatureRequest, dict]): - The request object. Request message for - [FeaturestoreService.UpdateFeature][google.cloud.aiplatform.v1.FeaturestoreService.UpdateFeature]. - feature (google.cloud.aiplatform_v1.types.Feature): - Required. The Feature's ``name`` field is used to - identify the Feature to be updated. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}`` - - This corresponds to the ``feature`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Field mask is used to specify the fields to be - overwritten in the Features resource by the update. The - fields specified in the update_mask are relative to the - resource, not the full request. A field will be - overwritten if it is in the mask. If the user does not - provide a mask then only the non-empty fields present in - the request will be overwritten. Set the update_mask to - ``*`` to override all fields. - - Updatable fields: - - - ``description`` - - ``labels`` - - ``monitoring_config.snapshot_analysis.disabled`` - - ``monitoring_config.snapshot_analysis.monitoring_interval`` - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Feature: - Feature Metadata information that - describes an attribute of an entity - type. For example, apple is an entity - type, and color is a feature that - describes apple. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([feature, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_service.UpdateFeatureRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_service.UpdateFeatureRequest): - request = featurestore_service.UpdateFeatureRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if feature is not None: - request.feature = feature - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_feature] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("feature.name", request.feature.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_feature(self, - request: Union[featurestore_service.DeleteFeatureRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a single Feature. - - Args: - request (Union[google.cloud.aiplatform_v1.types.DeleteFeatureRequest, dict]): - The request object. Request message for - [FeaturestoreService.DeleteFeature][google.cloud.aiplatform.v1.FeaturestoreService.DeleteFeature]. - name (str): - Required. The name of the Features to be deleted. - Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_service.DeleteFeatureRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_service.DeleteFeatureRequest): - request = featurestore_service.DeleteFeatureRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_feature] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def import_feature_values(self, - request: Union[featurestore_service.ImportFeatureValuesRequest, dict] = None, - *, - entity_type: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Imports Feature values into the Featurestore from a - source storage. - The progress of the import is tracked by the returned - operation. The imported features are guaranteed to be - visible to subsequent read operations after the - operation is marked as successfully done. - If an import operation fails, the Feature values - returned from reads and exports may be inconsistent. If - consistency is required, the caller must retry the same - import request again and wait till the new operation - returned is marked as successfully done. - There are also scenarios where the caller can cause - inconsistency. - - Source data for import contains multiple distinct - Feature values for the same entity ID and timestamp. - - Source is modified during an import. This includes - adding, updating, or removing source data and/or - metadata. Examples of updating metadata include but are - not limited to changing storage location, storage class, - or retention policy. - - Online serving cluster is under-provisioned. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ImportFeatureValuesRequest, dict]): - The request object. Request message for - [FeaturestoreService.ImportFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.ImportFeatureValues]. - entity_type (str): - Required. The resource name of the EntityType grouping - the Features for which values are being imported. - Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}`` - - This corresponds to the ``entity_type`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1.types.ImportFeatureValuesResponse` - Response message for - [FeaturestoreService.ImportFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.ImportFeatureValues]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([entity_type]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_service.ImportFeatureValuesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_service.ImportFeatureValuesRequest): - request = featurestore_service.ImportFeatureValuesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if entity_type is not None: - request.entity_type = entity_type - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.import_feature_values] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("entity_type", request.entity_type), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - featurestore_service.ImportFeatureValuesResponse, - metadata_type=featurestore_service.ImportFeatureValuesOperationMetadata, - ) - - # Done; return the response. - return response - - def batch_read_feature_values(self, - request: Union[featurestore_service.BatchReadFeatureValuesRequest, dict] = None, - *, - featurestore: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Batch reads Feature values from a Featurestore. - This API enables batch reading Feature values, where - each read instance in the batch may read Feature values - of entities from one or more EntityTypes. Point-in-time - correctness is guaranteed for Feature values of each - read instance as of each instance's read timestamp. - - Args: - request (Union[google.cloud.aiplatform_v1.types.BatchReadFeatureValuesRequest, dict]): - The request object. Request message for - [FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.BatchReadFeatureValues]. - featurestore (str): - Required. The resource name of the Featurestore from - which to query Feature values. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}`` - - This corresponds to the ``featurestore`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1.types.BatchReadFeatureValuesResponse` - Response message for - [FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.BatchReadFeatureValues]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([featurestore]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_service.BatchReadFeatureValuesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_service.BatchReadFeatureValuesRequest): - request = featurestore_service.BatchReadFeatureValuesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if featurestore is not None: - request.featurestore = featurestore - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.batch_read_feature_values] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("featurestore", request.featurestore), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - featurestore_service.BatchReadFeatureValuesResponse, - metadata_type=featurestore_service.BatchReadFeatureValuesOperationMetadata, - ) - - # Done; return the response. - return response - - def export_feature_values(self, - request: Union[featurestore_service.ExportFeatureValuesRequest, dict] = None, - *, - entity_type: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Exports Feature values from all the entities of a - target EntityType. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ExportFeatureValuesRequest, dict]): - The request object. Request message for - [FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.ExportFeatureValues]. - entity_type (str): - Required. The resource name of the EntityType from which - to export Feature values. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - - This corresponds to the ``entity_type`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1.types.ExportFeatureValuesResponse` - Response message for - [FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.ExportFeatureValues]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([entity_type]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_service.ExportFeatureValuesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_service.ExportFeatureValuesRequest): - request = featurestore_service.ExportFeatureValuesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if entity_type is not None: - request.entity_type = entity_type - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.export_feature_values] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("entity_type", request.entity_type), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - featurestore_service.ExportFeatureValuesResponse, - metadata_type=featurestore_service.ExportFeatureValuesOperationMetadata, - ) - - # Done; return the response. - return response - - def search_features(self, - request: Union[featurestore_service.SearchFeaturesRequest, dict] = None, - *, - location: str = None, - query: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.SearchFeaturesPager: - r"""Searches Features matching a query in a given - project. - - Args: - request (Union[google.cloud.aiplatform_v1.types.SearchFeaturesRequest, dict]): - The request object. Request message for - [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1.FeaturestoreService.SearchFeatures]. - location (str): - Required. The resource name of the Location to search - Features. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``location`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - query (str): - Query string that is a conjunction of field-restricted - queries and/or field-restricted filters. - Field-restricted queries and filters can be combined - using ``AND`` to form a conjunction. - - A field query is in the form FIELD:QUERY. This - implicitly checks if QUERY exists as a substring within - Feature's FIELD. The QUERY and the FIELD are converted - to a sequence of words (i.e. tokens) for comparison. - This is done by: - - - Removing leading/trailing whitespace and tokenizing - the search value. Characters that are not one of - alphanumeric ``[a-zA-Z0-9]``, underscore ``_``, or - asterisk ``*`` are treated as delimiters for tokens. - ``*`` is treated as a wildcard that matches - characters within a token. - - Ignoring case. - - Prepending an asterisk to the first and appending an - asterisk to the last token in QUERY. - - A QUERY must be either a singular token or a phrase. A - phrase is one or multiple words enclosed in double - quotation marks ("). With phrases, the order of the - words is important. Words in the phrase must be matching - in order and consecutively. - - Supported FIELDs for field-restricted queries: - - - ``feature_id`` - - ``description`` - - ``entity_type_id`` - - Examples: - - - ``feature_id: foo`` --> Matches a Feature with ID - containing the substring ``foo`` (eg. ``foo``, - ``foofeature``, ``barfoo``). - - ``feature_id: foo*feature`` --> Matches a Feature - with ID containing the substring ``foo*feature`` (eg. - ``foobarfeature``). - - ``feature_id: foo AND description: bar`` --> Matches - a Feature with ID containing the substring ``foo`` - and description containing the substring ``bar``. - - Besides field queries, the following exact-match filters - are supported. The exact-match filters do not support - wildcards. Unlike field-restricted queries, exact-match - filters are case-sensitive. - - - ``feature_id``: Supports = comparisons. - - ``description``: Supports = comparisons. Multi-token - filters should be enclosed in quotes. - - ``entity_type_id``: Supports = comparisons. - - ``value_type``: Supports = and != comparisons. - - ``labels``: Supports key-value equality as well as - key presence. - - ``featurestore_id``: Supports = comparisons. - - Examples: - - - ``description = "foo bar"`` --> Any Feature with - description exactly equal to ``foo bar`` - - ``value_type = DOUBLE`` --> Features whose type is - DOUBLE. - - ``labels.active = yes AND labels.env = prod`` --> - Features having both (active: yes) and (env: prod) - labels. - - ``labels.env: *`` --> Any Feature which has a label - with ``env`` as the key. - - This corresponds to the ``query`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.featurestore_service.pagers.SearchFeaturesPager: - Response message for - [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1.FeaturestoreService.SearchFeatures]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([location, query]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_service.SearchFeaturesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_service.SearchFeaturesRequest): - request = featurestore_service.SearchFeaturesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if location is not None: - request.location = location - if query is not None: - request.query = query - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.search_features] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("location", request.location), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.SearchFeaturesPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - """Releases underlying transport's resources. - - .. warning:: - ONLY use as a context manager if the transport is NOT shared - with other clients! Exiting the with block will CLOSE the transport - and may cause errors in other clients! - """ - self.transport.close() - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "FeaturestoreServiceClient", -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/pagers.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/pagers.py deleted file mode 100644 index 6ae6466820..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/pagers.py +++ /dev/null @@ -1,509 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator - -from google.cloud.aiplatform_v1.types import entity_type -from google.cloud.aiplatform_v1.types import feature -from google.cloud.aiplatform_v1.types import featurestore -from google.cloud.aiplatform_v1.types import featurestore_service - - -class ListFeaturestoresPager: - """A pager for iterating through ``list_featurestores`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListFeaturestoresResponse` object, and - provides an ``__iter__`` method to iterate through its - ``featurestores`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListFeaturestores`` requests and continue to iterate - through the ``featurestores`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListFeaturestoresResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., featurestore_service.ListFeaturestoresResponse], - request: featurestore_service.ListFeaturestoresRequest, - response: featurestore_service.ListFeaturestoresResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListFeaturestoresRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListFeaturestoresResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = featurestore_service.ListFeaturestoresRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[featurestore_service.ListFeaturestoresResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[featurestore.Featurestore]: - for page in self.pages: - yield from page.featurestores - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListFeaturestoresAsyncPager: - """A pager for iterating through ``list_featurestores`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListFeaturestoresResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``featurestores`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListFeaturestores`` requests and continue to iterate - through the ``featurestores`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListFeaturestoresResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[featurestore_service.ListFeaturestoresResponse]], - request: featurestore_service.ListFeaturestoresRequest, - response: featurestore_service.ListFeaturestoresResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListFeaturestoresRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListFeaturestoresResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = featurestore_service.ListFeaturestoresRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[featurestore_service.ListFeaturestoresResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[featurestore.Featurestore]: - async def async_generator(): - async for page in self.pages: - for response in page.featurestores: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListEntityTypesPager: - """A pager for iterating through ``list_entity_types`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListEntityTypesResponse` object, and - provides an ``__iter__`` method to iterate through its - ``entity_types`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListEntityTypes`` requests and continue to iterate - through the ``entity_types`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListEntityTypesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., featurestore_service.ListEntityTypesResponse], - request: featurestore_service.ListEntityTypesRequest, - response: featurestore_service.ListEntityTypesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListEntityTypesRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListEntityTypesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = featurestore_service.ListEntityTypesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[featurestore_service.ListEntityTypesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[entity_type.EntityType]: - for page in self.pages: - yield from page.entity_types - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListEntityTypesAsyncPager: - """A pager for iterating through ``list_entity_types`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListEntityTypesResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``entity_types`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListEntityTypes`` requests and continue to iterate - through the ``entity_types`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListEntityTypesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[featurestore_service.ListEntityTypesResponse]], - request: featurestore_service.ListEntityTypesRequest, - response: featurestore_service.ListEntityTypesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListEntityTypesRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListEntityTypesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = featurestore_service.ListEntityTypesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[featurestore_service.ListEntityTypesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[entity_type.EntityType]: - async def async_generator(): - async for page in self.pages: - for response in page.entity_types: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListFeaturesPager: - """A pager for iterating through ``list_features`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListFeaturesResponse` object, and - provides an ``__iter__`` method to iterate through its - ``features`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListFeatures`` requests and continue to iterate - through the ``features`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListFeaturesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., featurestore_service.ListFeaturesResponse], - request: featurestore_service.ListFeaturesRequest, - response: featurestore_service.ListFeaturesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListFeaturesRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListFeaturesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = featurestore_service.ListFeaturesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[featurestore_service.ListFeaturesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[feature.Feature]: - for page in self.pages: - yield from page.features - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListFeaturesAsyncPager: - """A pager for iterating through ``list_features`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListFeaturesResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``features`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListFeatures`` requests and continue to iterate - through the ``features`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListFeaturesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[featurestore_service.ListFeaturesResponse]], - request: featurestore_service.ListFeaturesRequest, - response: featurestore_service.ListFeaturesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListFeaturesRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListFeaturesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = featurestore_service.ListFeaturesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[featurestore_service.ListFeaturesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[feature.Feature]: - async def async_generator(): - async for page in self.pages: - for response in page.features: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class SearchFeaturesPager: - """A pager for iterating through ``search_features`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.SearchFeaturesResponse` object, and - provides an ``__iter__`` method to iterate through its - ``features`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``SearchFeatures`` requests and continue to iterate - through the ``features`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.SearchFeaturesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., featurestore_service.SearchFeaturesResponse], - request: featurestore_service.SearchFeaturesRequest, - response: featurestore_service.SearchFeaturesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.SearchFeaturesRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.SearchFeaturesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = featurestore_service.SearchFeaturesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[featurestore_service.SearchFeaturesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[feature.Feature]: - for page in self.pages: - yield from page.features - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class SearchFeaturesAsyncPager: - """A pager for iterating through ``search_features`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.SearchFeaturesResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``features`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``SearchFeatures`` requests and continue to iterate - through the ``features`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.SearchFeaturesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[featurestore_service.SearchFeaturesResponse]], - request: featurestore_service.SearchFeaturesRequest, - response: featurestore_service.SearchFeaturesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.SearchFeaturesRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.SearchFeaturesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = featurestore_service.SearchFeaturesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[featurestore_service.SearchFeaturesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[feature.Feature]: - async def async_generator(): - async for page in self.pages: - for response in page.features: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/transports/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/transports/__init__.py deleted file mode 100644 index e8a1ff1b03..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import FeaturestoreServiceTransport -from .grpc import FeaturestoreServiceGrpcTransport -from .grpc_asyncio import FeaturestoreServiceGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[FeaturestoreServiceTransport]] -_transport_registry['grpc'] = FeaturestoreServiceGrpcTransport -_transport_registry['grpc_asyncio'] = FeaturestoreServiceGrpcAsyncIOTransport - -__all__ = ( - 'FeaturestoreServiceTransport', - 'FeaturestoreServiceGrpcTransport', - 'FeaturestoreServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/transports/base.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/transports/base.py deleted file mode 100644 index a9c826a5e6..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/transports/base.py +++ /dev/null @@ -1,424 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import pkg_resources - -import google.auth # type: ignore -import google.api_core -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.aiplatform_v1.types import entity_type -from google.cloud.aiplatform_v1.types import entity_type as gca_entity_type -from google.cloud.aiplatform_v1.types import feature -from google.cloud.aiplatform_v1.types import feature as gca_feature -from google.cloud.aiplatform_v1.types import featurestore -from google.cloud.aiplatform_v1.types import featurestore_service -from google.longrunning import operations_pb2 # type: ignore - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-aiplatform', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -class FeaturestoreServiceTransport(abc.ABC): - """Abstract transport class for FeaturestoreService.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'aiplatform.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials are service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.create_featurestore: gapic_v1.method.wrap_method( - self.create_featurestore, - default_timeout=None, - client_info=client_info, - ), - self.get_featurestore: gapic_v1.method.wrap_method( - self.get_featurestore, - default_timeout=None, - client_info=client_info, - ), - self.list_featurestores: gapic_v1.method.wrap_method( - self.list_featurestores, - default_timeout=None, - client_info=client_info, - ), - self.update_featurestore: gapic_v1.method.wrap_method( - self.update_featurestore, - default_timeout=None, - client_info=client_info, - ), - self.delete_featurestore: gapic_v1.method.wrap_method( - self.delete_featurestore, - default_timeout=None, - client_info=client_info, - ), - self.create_entity_type: gapic_v1.method.wrap_method( - self.create_entity_type, - default_timeout=None, - client_info=client_info, - ), - self.get_entity_type: gapic_v1.method.wrap_method( - self.get_entity_type, - default_timeout=None, - client_info=client_info, - ), - self.list_entity_types: gapic_v1.method.wrap_method( - self.list_entity_types, - default_timeout=None, - client_info=client_info, - ), - self.update_entity_type: gapic_v1.method.wrap_method( - self.update_entity_type, - default_timeout=None, - client_info=client_info, - ), - self.delete_entity_type: gapic_v1.method.wrap_method( - self.delete_entity_type, - default_timeout=None, - client_info=client_info, - ), - self.create_feature: gapic_v1.method.wrap_method( - self.create_feature, - default_timeout=None, - client_info=client_info, - ), - self.batch_create_features: gapic_v1.method.wrap_method( - self.batch_create_features, - default_timeout=None, - client_info=client_info, - ), - self.get_feature: gapic_v1.method.wrap_method( - self.get_feature, - default_timeout=None, - client_info=client_info, - ), - self.list_features: gapic_v1.method.wrap_method( - self.list_features, - default_timeout=None, - client_info=client_info, - ), - self.update_feature: gapic_v1.method.wrap_method( - self.update_feature, - default_timeout=None, - client_info=client_info, - ), - self.delete_feature: gapic_v1.method.wrap_method( - self.delete_feature, - default_timeout=None, - client_info=client_info, - ), - self.import_feature_values: gapic_v1.method.wrap_method( - self.import_feature_values, - default_timeout=None, - client_info=client_info, - ), - self.batch_read_feature_values: gapic_v1.method.wrap_method( - self.batch_read_feature_values, - default_timeout=None, - client_info=client_info, - ), - self.export_feature_values: gapic_v1.method.wrap_method( - self.export_feature_values, - default_timeout=None, - client_info=client_info, - ), - self.search_features: gapic_v1.method.wrap_method( - self.search_features, - default_timeout=None, - client_info=client_info, - ), - } - - def close(self): - """Closes resources associated with the transport. - - .. warning:: - Only call this method if the transport is NOT shared - with other clients - this may cause errors in other clients! - """ - raise NotImplementedError() - - @property - def operations_client(self): - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def create_featurestore(self) -> Callable[ - [featurestore_service.CreateFeaturestoreRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def get_featurestore(self) -> Callable[ - [featurestore_service.GetFeaturestoreRequest], - Union[ - featurestore.Featurestore, - Awaitable[featurestore.Featurestore] - ]]: - raise NotImplementedError() - - @property - def list_featurestores(self) -> Callable[ - [featurestore_service.ListFeaturestoresRequest], - Union[ - featurestore_service.ListFeaturestoresResponse, - Awaitable[featurestore_service.ListFeaturestoresResponse] - ]]: - raise NotImplementedError() - - @property - def update_featurestore(self) -> Callable[ - [featurestore_service.UpdateFeaturestoreRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def delete_featurestore(self) -> Callable[ - [featurestore_service.DeleteFeaturestoreRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def create_entity_type(self) -> Callable[ - [featurestore_service.CreateEntityTypeRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def get_entity_type(self) -> Callable[ - [featurestore_service.GetEntityTypeRequest], - Union[ - entity_type.EntityType, - Awaitable[entity_type.EntityType] - ]]: - raise NotImplementedError() - - @property - def list_entity_types(self) -> Callable[ - [featurestore_service.ListEntityTypesRequest], - Union[ - featurestore_service.ListEntityTypesResponse, - Awaitable[featurestore_service.ListEntityTypesResponse] - ]]: - raise NotImplementedError() - - @property - def update_entity_type(self) -> Callable[ - [featurestore_service.UpdateEntityTypeRequest], - Union[ - gca_entity_type.EntityType, - Awaitable[gca_entity_type.EntityType] - ]]: - raise NotImplementedError() - - @property - def delete_entity_type(self) -> Callable[ - [featurestore_service.DeleteEntityTypeRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def create_feature(self) -> Callable[ - [featurestore_service.CreateFeatureRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def batch_create_features(self) -> Callable[ - [featurestore_service.BatchCreateFeaturesRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def get_feature(self) -> Callable[ - [featurestore_service.GetFeatureRequest], - Union[ - feature.Feature, - Awaitable[feature.Feature] - ]]: - raise NotImplementedError() - - @property - def list_features(self) -> Callable[ - [featurestore_service.ListFeaturesRequest], - Union[ - featurestore_service.ListFeaturesResponse, - Awaitable[featurestore_service.ListFeaturesResponse] - ]]: - raise NotImplementedError() - - @property - def update_feature(self) -> Callable[ - [featurestore_service.UpdateFeatureRequest], - Union[ - gca_feature.Feature, - Awaitable[gca_feature.Feature] - ]]: - raise NotImplementedError() - - @property - def delete_feature(self) -> Callable[ - [featurestore_service.DeleteFeatureRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def import_feature_values(self) -> Callable[ - [featurestore_service.ImportFeatureValuesRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def batch_read_feature_values(self) -> Callable[ - [featurestore_service.BatchReadFeatureValuesRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def export_feature_values(self) -> Callable[ - [featurestore_service.ExportFeatureValuesRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def search_features(self) -> Callable[ - [featurestore_service.SearchFeaturesRequest], - Union[ - featurestore_service.SearchFeaturesResponse, - Awaitable[featurestore_service.SearchFeaturesResponse] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'FeaturestoreServiceTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/transports/grpc.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/transports/grpc.py deleted file mode 100644 index cc08a5d3c0..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/transports/grpc.py +++ /dev/null @@ -1,805 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers -from google.api_core import operations_v1 -from google.api_core import gapic_v1 -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.aiplatform_v1.types import entity_type -from google.cloud.aiplatform_v1.types import entity_type as gca_entity_type -from google.cloud.aiplatform_v1.types import feature -from google.cloud.aiplatform_v1.types import feature as gca_feature -from google.cloud.aiplatform_v1.types import featurestore -from google.cloud.aiplatform_v1.types import featurestore_service -from google.longrunning import operations_pb2 # type: ignore -from .base import FeaturestoreServiceTransport, DEFAULT_CLIENT_INFO - - -class FeaturestoreServiceGrpcTransport(FeaturestoreServiceTransport): - """gRPC backend transport for FeaturestoreService. - - The service that handles CRUD and List for resources for - Featurestore. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_featurestore(self) -> Callable[ - [featurestore_service.CreateFeaturestoreRequest], - operations_pb2.Operation]: - r"""Return a callable for the create featurestore method over gRPC. - - Creates a new Featurestore in a given project and - location. - - Returns: - Callable[[~.CreateFeaturestoreRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_featurestore' not in self._stubs: - self._stubs['create_featurestore'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.FeaturestoreService/CreateFeaturestore', - request_serializer=featurestore_service.CreateFeaturestoreRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_featurestore'] - - @property - def get_featurestore(self) -> Callable[ - [featurestore_service.GetFeaturestoreRequest], - featurestore.Featurestore]: - r"""Return a callable for the get featurestore method over gRPC. - - Gets details of a single Featurestore. - - Returns: - Callable[[~.GetFeaturestoreRequest], - ~.Featurestore]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_featurestore' not in self._stubs: - self._stubs['get_featurestore'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.FeaturestoreService/GetFeaturestore', - request_serializer=featurestore_service.GetFeaturestoreRequest.serialize, - response_deserializer=featurestore.Featurestore.deserialize, - ) - return self._stubs['get_featurestore'] - - @property - def list_featurestores(self) -> Callable[ - [featurestore_service.ListFeaturestoresRequest], - featurestore_service.ListFeaturestoresResponse]: - r"""Return a callable for the list featurestores method over gRPC. - - Lists Featurestores in a given project and location. - - Returns: - Callable[[~.ListFeaturestoresRequest], - ~.ListFeaturestoresResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_featurestores' not in self._stubs: - self._stubs['list_featurestores'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.FeaturestoreService/ListFeaturestores', - request_serializer=featurestore_service.ListFeaturestoresRequest.serialize, - response_deserializer=featurestore_service.ListFeaturestoresResponse.deserialize, - ) - return self._stubs['list_featurestores'] - - @property - def update_featurestore(self) -> Callable[ - [featurestore_service.UpdateFeaturestoreRequest], - operations_pb2.Operation]: - r"""Return a callable for the update featurestore method over gRPC. - - Updates the parameters of a single Featurestore. - - Returns: - Callable[[~.UpdateFeaturestoreRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_featurestore' not in self._stubs: - self._stubs['update_featurestore'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.FeaturestoreService/UpdateFeaturestore', - request_serializer=featurestore_service.UpdateFeaturestoreRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['update_featurestore'] - - @property - def delete_featurestore(self) -> Callable[ - [featurestore_service.DeleteFeaturestoreRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete featurestore method over gRPC. - - Deletes a single Featurestore. The Featurestore must not contain - any EntityTypes or ``force`` must be set to true for the request - to succeed. - - Returns: - Callable[[~.DeleteFeaturestoreRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_featurestore' not in self._stubs: - self._stubs['delete_featurestore'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.FeaturestoreService/DeleteFeaturestore', - request_serializer=featurestore_service.DeleteFeaturestoreRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_featurestore'] - - @property - def create_entity_type(self) -> Callable[ - [featurestore_service.CreateEntityTypeRequest], - operations_pb2.Operation]: - r"""Return a callable for the create entity type method over gRPC. - - Creates a new EntityType in a given Featurestore. - - Returns: - Callable[[~.CreateEntityTypeRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_entity_type' not in self._stubs: - self._stubs['create_entity_type'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.FeaturestoreService/CreateEntityType', - request_serializer=featurestore_service.CreateEntityTypeRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_entity_type'] - - @property - def get_entity_type(self) -> Callable[ - [featurestore_service.GetEntityTypeRequest], - entity_type.EntityType]: - r"""Return a callable for the get entity type method over gRPC. - - Gets details of a single EntityType. - - Returns: - Callable[[~.GetEntityTypeRequest], - ~.EntityType]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_entity_type' not in self._stubs: - self._stubs['get_entity_type'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.FeaturestoreService/GetEntityType', - request_serializer=featurestore_service.GetEntityTypeRequest.serialize, - response_deserializer=entity_type.EntityType.deserialize, - ) - return self._stubs['get_entity_type'] - - @property - def list_entity_types(self) -> Callable[ - [featurestore_service.ListEntityTypesRequest], - featurestore_service.ListEntityTypesResponse]: - r"""Return a callable for the list entity types method over gRPC. - - Lists EntityTypes in a given Featurestore. - - Returns: - Callable[[~.ListEntityTypesRequest], - ~.ListEntityTypesResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_entity_types' not in self._stubs: - self._stubs['list_entity_types'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.FeaturestoreService/ListEntityTypes', - request_serializer=featurestore_service.ListEntityTypesRequest.serialize, - response_deserializer=featurestore_service.ListEntityTypesResponse.deserialize, - ) - return self._stubs['list_entity_types'] - - @property - def update_entity_type(self) -> Callable[ - [featurestore_service.UpdateEntityTypeRequest], - gca_entity_type.EntityType]: - r"""Return a callable for the update entity type method over gRPC. - - Updates the parameters of a single EntityType. - - Returns: - Callable[[~.UpdateEntityTypeRequest], - ~.EntityType]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_entity_type' not in self._stubs: - self._stubs['update_entity_type'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.FeaturestoreService/UpdateEntityType', - request_serializer=featurestore_service.UpdateEntityTypeRequest.serialize, - response_deserializer=gca_entity_type.EntityType.deserialize, - ) - return self._stubs['update_entity_type'] - - @property - def delete_entity_type(self) -> Callable[ - [featurestore_service.DeleteEntityTypeRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete entity type method over gRPC. - - Deletes a single EntityType. The EntityType must not have any - Features or ``force`` must be set to true for the request to - succeed. - - Returns: - Callable[[~.DeleteEntityTypeRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_entity_type' not in self._stubs: - self._stubs['delete_entity_type'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.FeaturestoreService/DeleteEntityType', - request_serializer=featurestore_service.DeleteEntityTypeRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_entity_type'] - - @property - def create_feature(self) -> Callable[ - [featurestore_service.CreateFeatureRequest], - operations_pb2.Operation]: - r"""Return a callable for the create feature method over gRPC. - - Creates a new Feature in a given EntityType. - - Returns: - Callable[[~.CreateFeatureRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_feature' not in self._stubs: - self._stubs['create_feature'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.FeaturestoreService/CreateFeature', - request_serializer=featurestore_service.CreateFeatureRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_feature'] - - @property - def batch_create_features(self) -> Callable[ - [featurestore_service.BatchCreateFeaturesRequest], - operations_pb2.Operation]: - r"""Return a callable for the batch create features method over gRPC. - - Creates a batch of Features in a given EntityType. - - Returns: - Callable[[~.BatchCreateFeaturesRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'batch_create_features' not in self._stubs: - self._stubs['batch_create_features'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.FeaturestoreService/BatchCreateFeatures', - request_serializer=featurestore_service.BatchCreateFeaturesRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['batch_create_features'] - - @property - def get_feature(self) -> Callable[ - [featurestore_service.GetFeatureRequest], - feature.Feature]: - r"""Return a callable for the get feature method over gRPC. - - Gets details of a single Feature. - - Returns: - Callable[[~.GetFeatureRequest], - ~.Feature]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_feature' not in self._stubs: - self._stubs['get_feature'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.FeaturestoreService/GetFeature', - request_serializer=featurestore_service.GetFeatureRequest.serialize, - response_deserializer=feature.Feature.deserialize, - ) - return self._stubs['get_feature'] - - @property - def list_features(self) -> Callable[ - [featurestore_service.ListFeaturesRequest], - featurestore_service.ListFeaturesResponse]: - r"""Return a callable for the list features method over gRPC. - - Lists Features in a given EntityType. - - Returns: - Callable[[~.ListFeaturesRequest], - ~.ListFeaturesResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_features' not in self._stubs: - self._stubs['list_features'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.FeaturestoreService/ListFeatures', - request_serializer=featurestore_service.ListFeaturesRequest.serialize, - response_deserializer=featurestore_service.ListFeaturesResponse.deserialize, - ) - return self._stubs['list_features'] - - @property - def update_feature(self) -> Callable[ - [featurestore_service.UpdateFeatureRequest], - gca_feature.Feature]: - r"""Return a callable for the update feature method over gRPC. - - Updates the parameters of a single Feature. - - Returns: - Callable[[~.UpdateFeatureRequest], - ~.Feature]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_feature' not in self._stubs: - self._stubs['update_feature'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.FeaturestoreService/UpdateFeature', - request_serializer=featurestore_service.UpdateFeatureRequest.serialize, - response_deserializer=gca_feature.Feature.deserialize, - ) - return self._stubs['update_feature'] - - @property - def delete_feature(self) -> Callable[ - [featurestore_service.DeleteFeatureRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete feature method over gRPC. - - Deletes a single Feature. - - Returns: - Callable[[~.DeleteFeatureRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_feature' not in self._stubs: - self._stubs['delete_feature'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.FeaturestoreService/DeleteFeature', - request_serializer=featurestore_service.DeleteFeatureRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_feature'] - - @property - def import_feature_values(self) -> Callable[ - [featurestore_service.ImportFeatureValuesRequest], - operations_pb2.Operation]: - r"""Return a callable for the import feature values method over gRPC. - - Imports Feature values into the Featurestore from a - source storage. - The progress of the import is tracked by the returned - operation. The imported features are guaranteed to be - visible to subsequent read operations after the - operation is marked as successfully done. - If an import operation fails, the Feature values - returned from reads and exports may be inconsistent. If - consistency is required, the caller must retry the same - import request again and wait till the new operation - returned is marked as successfully done. - There are also scenarios where the caller can cause - inconsistency. - - Source data for import contains multiple distinct - Feature values for the same entity ID and timestamp. - - Source is modified during an import. This includes - adding, updating, or removing source data and/or - metadata. Examples of updating metadata include but are - not limited to changing storage location, storage class, - or retention policy. - - Online serving cluster is under-provisioned. - - Returns: - Callable[[~.ImportFeatureValuesRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'import_feature_values' not in self._stubs: - self._stubs['import_feature_values'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.FeaturestoreService/ImportFeatureValues', - request_serializer=featurestore_service.ImportFeatureValuesRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['import_feature_values'] - - @property - def batch_read_feature_values(self) -> Callable[ - [featurestore_service.BatchReadFeatureValuesRequest], - operations_pb2.Operation]: - r"""Return a callable for the batch read feature values method over gRPC. - - Batch reads Feature values from a Featurestore. - This API enables batch reading Feature values, where - each read instance in the batch may read Feature values - of entities from one or more EntityTypes. Point-in-time - correctness is guaranteed for Feature values of each - read instance as of each instance's read timestamp. - - Returns: - Callable[[~.BatchReadFeatureValuesRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'batch_read_feature_values' not in self._stubs: - self._stubs['batch_read_feature_values'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.FeaturestoreService/BatchReadFeatureValues', - request_serializer=featurestore_service.BatchReadFeatureValuesRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['batch_read_feature_values'] - - @property - def export_feature_values(self) -> Callable[ - [featurestore_service.ExportFeatureValuesRequest], - operations_pb2.Operation]: - r"""Return a callable for the export feature values method over gRPC. - - Exports Feature values from all the entities of a - target EntityType. - - Returns: - Callable[[~.ExportFeatureValuesRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'export_feature_values' not in self._stubs: - self._stubs['export_feature_values'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.FeaturestoreService/ExportFeatureValues', - request_serializer=featurestore_service.ExportFeatureValuesRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['export_feature_values'] - - @property - def search_features(self) -> Callable[ - [featurestore_service.SearchFeaturesRequest], - featurestore_service.SearchFeaturesResponse]: - r"""Return a callable for the search features method over gRPC. - - Searches Features matching a query in a given - project. - - Returns: - Callable[[~.SearchFeaturesRequest], - ~.SearchFeaturesResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'search_features' not in self._stubs: - self._stubs['search_features'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.FeaturestoreService/SearchFeatures', - request_serializer=featurestore_service.SearchFeaturesRequest.serialize, - response_deserializer=featurestore_service.SearchFeaturesResponse.deserialize, - ) - return self._stubs['search_features'] - - def close(self): - self.grpc_channel.close() - -__all__ = ( - 'FeaturestoreServiceGrpcTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/transports/grpc_asyncio.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/transports/grpc_asyncio.py deleted file mode 100644 index d839bc73d2..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/featurestore_service/transports/grpc_asyncio.py +++ /dev/null @@ -1,809 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers_async -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.aiplatform_v1.types import entity_type -from google.cloud.aiplatform_v1.types import entity_type as gca_entity_type -from google.cloud.aiplatform_v1.types import feature -from google.cloud.aiplatform_v1.types import feature as gca_feature -from google.cloud.aiplatform_v1.types import featurestore -from google.cloud.aiplatform_v1.types import featurestore_service -from google.longrunning import operations_pb2 # type: ignore -from .base import FeaturestoreServiceTransport, DEFAULT_CLIENT_INFO -from .grpc import FeaturestoreServiceGrpcTransport - - -class FeaturestoreServiceGrpcAsyncIOTransport(FeaturestoreServiceTransport): - """gRPC AsyncIO backend transport for FeaturestoreService. - - The service that handles CRUD and List for resources for - Featurestore. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsAsyncClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_featurestore(self) -> Callable[ - [featurestore_service.CreateFeaturestoreRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the create featurestore method over gRPC. - - Creates a new Featurestore in a given project and - location. - - Returns: - Callable[[~.CreateFeaturestoreRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_featurestore' not in self._stubs: - self._stubs['create_featurestore'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.FeaturestoreService/CreateFeaturestore', - request_serializer=featurestore_service.CreateFeaturestoreRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_featurestore'] - - @property - def get_featurestore(self) -> Callable[ - [featurestore_service.GetFeaturestoreRequest], - Awaitable[featurestore.Featurestore]]: - r"""Return a callable for the get featurestore method over gRPC. - - Gets details of a single Featurestore. - - Returns: - Callable[[~.GetFeaturestoreRequest], - Awaitable[~.Featurestore]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_featurestore' not in self._stubs: - self._stubs['get_featurestore'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.FeaturestoreService/GetFeaturestore', - request_serializer=featurestore_service.GetFeaturestoreRequest.serialize, - response_deserializer=featurestore.Featurestore.deserialize, - ) - return self._stubs['get_featurestore'] - - @property - def list_featurestores(self) -> Callable[ - [featurestore_service.ListFeaturestoresRequest], - Awaitable[featurestore_service.ListFeaturestoresResponse]]: - r"""Return a callable for the list featurestores method over gRPC. - - Lists Featurestores in a given project and location. - - Returns: - Callable[[~.ListFeaturestoresRequest], - Awaitable[~.ListFeaturestoresResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_featurestores' not in self._stubs: - self._stubs['list_featurestores'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.FeaturestoreService/ListFeaturestores', - request_serializer=featurestore_service.ListFeaturestoresRequest.serialize, - response_deserializer=featurestore_service.ListFeaturestoresResponse.deserialize, - ) - return self._stubs['list_featurestores'] - - @property - def update_featurestore(self) -> Callable[ - [featurestore_service.UpdateFeaturestoreRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the update featurestore method over gRPC. - - Updates the parameters of a single Featurestore. - - Returns: - Callable[[~.UpdateFeaturestoreRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_featurestore' not in self._stubs: - self._stubs['update_featurestore'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.FeaturestoreService/UpdateFeaturestore', - request_serializer=featurestore_service.UpdateFeaturestoreRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['update_featurestore'] - - @property - def delete_featurestore(self) -> Callable[ - [featurestore_service.DeleteFeaturestoreRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete featurestore method over gRPC. - - Deletes a single Featurestore. The Featurestore must not contain - any EntityTypes or ``force`` must be set to true for the request - to succeed. - - Returns: - Callable[[~.DeleteFeaturestoreRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_featurestore' not in self._stubs: - self._stubs['delete_featurestore'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.FeaturestoreService/DeleteFeaturestore', - request_serializer=featurestore_service.DeleteFeaturestoreRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_featurestore'] - - @property - def create_entity_type(self) -> Callable[ - [featurestore_service.CreateEntityTypeRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the create entity type method over gRPC. - - Creates a new EntityType in a given Featurestore. - - Returns: - Callable[[~.CreateEntityTypeRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_entity_type' not in self._stubs: - self._stubs['create_entity_type'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.FeaturestoreService/CreateEntityType', - request_serializer=featurestore_service.CreateEntityTypeRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_entity_type'] - - @property - def get_entity_type(self) -> Callable[ - [featurestore_service.GetEntityTypeRequest], - Awaitable[entity_type.EntityType]]: - r"""Return a callable for the get entity type method over gRPC. - - Gets details of a single EntityType. - - Returns: - Callable[[~.GetEntityTypeRequest], - Awaitable[~.EntityType]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_entity_type' not in self._stubs: - self._stubs['get_entity_type'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.FeaturestoreService/GetEntityType', - request_serializer=featurestore_service.GetEntityTypeRequest.serialize, - response_deserializer=entity_type.EntityType.deserialize, - ) - return self._stubs['get_entity_type'] - - @property - def list_entity_types(self) -> Callable[ - [featurestore_service.ListEntityTypesRequest], - Awaitable[featurestore_service.ListEntityTypesResponse]]: - r"""Return a callable for the list entity types method over gRPC. - - Lists EntityTypes in a given Featurestore. - - Returns: - Callable[[~.ListEntityTypesRequest], - Awaitable[~.ListEntityTypesResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_entity_types' not in self._stubs: - self._stubs['list_entity_types'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.FeaturestoreService/ListEntityTypes', - request_serializer=featurestore_service.ListEntityTypesRequest.serialize, - response_deserializer=featurestore_service.ListEntityTypesResponse.deserialize, - ) - return self._stubs['list_entity_types'] - - @property - def update_entity_type(self) -> Callable[ - [featurestore_service.UpdateEntityTypeRequest], - Awaitable[gca_entity_type.EntityType]]: - r"""Return a callable for the update entity type method over gRPC. - - Updates the parameters of a single EntityType. - - Returns: - Callable[[~.UpdateEntityTypeRequest], - Awaitable[~.EntityType]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_entity_type' not in self._stubs: - self._stubs['update_entity_type'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.FeaturestoreService/UpdateEntityType', - request_serializer=featurestore_service.UpdateEntityTypeRequest.serialize, - response_deserializer=gca_entity_type.EntityType.deserialize, - ) - return self._stubs['update_entity_type'] - - @property - def delete_entity_type(self) -> Callable[ - [featurestore_service.DeleteEntityTypeRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete entity type method over gRPC. - - Deletes a single EntityType. The EntityType must not have any - Features or ``force`` must be set to true for the request to - succeed. - - Returns: - Callable[[~.DeleteEntityTypeRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_entity_type' not in self._stubs: - self._stubs['delete_entity_type'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.FeaturestoreService/DeleteEntityType', - request_serializer=featurestore_service.DeleteEntityTypeRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_entity_type'] - - @property - def create_feature(self) -> Callable[ - [featurestore_service.CreateFeatureRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the create feature method over gRPC. - - Creates a new Feature in a given EntityType. - - Returns: - Callable[[~.CreateFeatureRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_feature' not in self._stubs: - self._stubs['create_feature'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.FeaturestoreService/CreateFeature', - request_serializer=featurestore_service.CreateFeatureRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_feature'] - - @property - def batch_create_features(self) -> Callable[ - [featurestore_service.BatchCreateFeaturesRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the batch create features method over gRPC. - - Creates a batch of Features in a given EntityType. - - Returns: - Callable[[~.BatchCreateFeaturesRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'batch_create_features' not in self._stubs: - self._stubs['batch_create_features'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.FeaturestoreService/BatchCreateFeatures', - request_serializer=featurestore_service.BatchCreateFeaturesRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['batch_create_features'] - - @property - def get_feature(self) -> Callable[ - [featurestore_service.GetFeatureRequest], - Awaitable[feature.Feature]]: - r"""Return a callable for the get feature method over gRPC. - - Gets details of a single Feature. - - Returns: - Callable[[~.GetFeatureRequest], - Awaitable[~.Feature]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_feature' not in self._stubs: - self._stubs['get_feature'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.FeaturestoreService/GetFeature', - request_serializer=featurestore_service.GetFeatureRequest.serialize, - response_deserializer=feature.Feature.deserialize, - ) - return self._stubs['get_feature'] - - @property - def list_features(self) -> Callable[ - [featurestore_service.ListFeaturesRequest], - Awaitable[featurestore_service.ListFeaturesResponse]]: - r"""Return a callable for the list features method over gRPC. - - Lists Features in a given EntityType. - - Returns: - Callable[[~.ListFeaturesRequest], - Awaitable[~.ListFeaturesResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_features' not in self._stubs: - self._stubs['list_features'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.FeaturestoreService/ListFeatures', - request_serializer=featurestore_service.ListFeaturesRequest.serialize, - response_deserializer=featurestore_service.ListFeaturesResponse.deserialize, - ) - return self._stubs['list_features'] - - @property - def update_feature(self) -> Callable[ - [featurestore_service.UpdateFeatureRequest], - Awaitable[gca_feature.Feature]]: - r"""Return a callable for the update feature method over gRPC. - - Updates the parameters of a single Feature. - - Returns: - Callable[[~.UpdateFeatureRequest], - Awaitable[~.Feature]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_feature' not in self._stubs: - self._stubs['update_feature'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.FeaturestoreService/UpdateFeature', - request_serializer=featurestore_service.UpdateFeatureRequest.serialize, - response_deserializer=gca_feature.Feature.deserialize, - ) - return self._stubs['update_feature'] - - @property - def delete_feature(self) -> Callable[ - [featurestore_service.DeleteFeatureRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete feature method over gRPC. - - Deletes a single Feature. - - Returns: - Callable[[~.DeleteFeatureRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_feature' not in self._stubs: - self._stubs['delete_feature'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.FeaturestoreService/DeleteFeature', - request_serializer=featurestore_service.DeleteFeatureRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_feature'] - - @property - def import_feature_values(self) -> Callable[ - [featurestore_service.ImportFeatureValuesRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the import feature values method over gRPC. - - Imports Feature values into the Featurestore from a - source storage. - The progress of the import is tracked by the returned - operation. The imported features are guaranteed to be - visible to subsequent read operations after the - operation is marked as successfully done. - If an import operation fails, the Feature values - returned from reads and exports may be inconsistent. If - consistency is required, the caller must retry the same - import request again and wait till the new operation - returned is marked as successfully done. - There are also scenarios where the caller can cause - inconsistency. - - Source data for import contains multiple distinct - Feature values for the same entity ID and timestamp. - - Source is modified during an import. This includes - adding, updating, or removing source data and/or - metadata. Examples of updating metadata include but are - not limited to changing storage location, storage class, - or retention policy. - - Online serving cluster is under-provisioned. - - Returns: - Callable[[~.ImportFeatureValuesRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'import_feature_values' not in self._stubs: - self._stubs['import_feature_values'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.FeaturestoreService/ImportFeatureValues', - request_serializer=featurestore_service.ImportFeatureValuesRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['import_feature_values'] - - @property - def batch_read_feature_values(self) -> Callable[ - [featurestore_service.BatchReadFeatureValuesRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the batch read feature values method over gRPC. - - Batch reads Feature values from a Featurestore. - This API enables batch reading Feature values, where - each read instance in the batch may read Feature values - of entities from one or more EntityTypes. Point-in-time - correctness is guaranteed for Feature values of each - read instance as of each instance's read timestamp. - - Returns: - Callable[[~.BatchReadFeatureValuesRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'batch_read_feature_values' not in self._stubs: - self._stubs['batch_read_feature_values'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.FeaturestoreService/BatchReadFeatureValues', - request_serializer=featurestore_service.BatchReadFeatureValuesRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['batch_read_feature_values'] - - @property - def export_feature_values(self) -> Callable[ - [featurestore_service.ExportFeatureValuesRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the export feature values method over gRPC. - - Exports Feature values from all the entities of a - target EntityType. - - Returns: - Callable[[~.ExportFeatureValuesRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'export_feature_values' not in self._stubs: - self._stubs['export_feature_values'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.FeaturestoreService/ExportFeatureValues', - request_serializer=featurestore_service.ExportFeatureValuesRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['export_feature_values'] - - @property - def search_features(self) -> Callable[ - [featurestore_service.SearchFeaturesRequest], - Awaitable[featurestore_service.SearchFeaturesResponse]]: - r"""Return a callable for the search features method over gRPC. - - Searches Features matching a query in a given - project. - - Returns: - Callable[[~.SearchFeaturesRequest], - Awaitable[~.SearchFeaturesResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'search_features' not in self._stubs: - self._stubs['search_features'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.FeaturestoreService/SearchFeatures', - request_serializer=featurestore_service.SearchFeaturesRequest.serialize, - response_deserializer=featurestore_service.SearchFeaturesResponse.deserialize, - ) - return self._stubs['search_features'] - - def close(self): - return self.grpc_channel.close() - - -__all__ = ( - 'FeaturestoreServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/__init__.py deleted file mode 100644 index fb5d596b18..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import IndexEndpointServiceClient -from .async_client import IndexEndpointServiceAsyncClient - -__all__ = ( - 'IndexEndpointServiceClient', - 'IndexEndpointServiceAsyncClient', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/async_client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/async_client.py deleted file mode 100644 index af58334dc4..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/async_client.py +++ /dev/null @@ -1,925 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core.client_options import ClientOptions -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1.services.index_endpoint_service import pagers -from google.cloud.aiplatform_v1.types import index_endpoint -from google.cloud.aiplatform_v1.types import index_endpoint as gca_index_endpoint -from google.cloud.aiplatform_v1.types import index_endpoint_service -from google.cloud.aiplatform_v1.types import operation as gca_operation -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import IndexEndpointServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import IndexEndpointServiceGrpcAsyncIOTransport -from .client import IndexEndpointServiceClient - - -class IndexEndpointServiceAsyncClient: - """A service for managing Vertex AI's IndexEndpoints.""" - - _client: IndexEndpointServiceClient - - DEFAULT_ENDPOINT = IndexEndpointServiceClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = IndexEndpointServiceClient.DEFAULT_MTLS_ENDPOINT - - index_path = staticmethod(IndexEndpointServiceClient.index_path) - parse_index_path = staticmethod(IndexEndpointServiceClient.parse_index_path) - index_endpoint_path = staticmethod(IndexEndpointServiceClient.index_endpoint_path) - parse_index_endpoint_path = staticmethod(IndexEndpointServiceClient.parse_index_endpoint_path) - common_billing_account_path = staticmethod(IndexEndpointServiceClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(IndexEndpointServiceClient.parse_common_billing_account_path) - common_folder_path = staticmethod(IndexEndpointServiceClient.common_folder_path) - parse_common_folder_path = staticmethod(IndexEndpointServiceClient.parse_common_folder_path) - common_organization_path = staticmethod(IndexEndpointServiceClient.common_organization_path) - parse_common_organization_path = staticmethod(IndexEndpointServiceClient.parse_common_organization_path) - common_project_path = staticmethod(IndexEndpointServiceClient.common_project_path) - parse_common_project_path = staticmethod(IndexEndpointServiceClient.parse_common_project_path) - common_location_path = staticmethod(IndexEndpointServiceClient.common_location_path) - parse_common_location_path = staticmethod(IndexEndpointServiceClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - IndexEndpointServiceAsyncClient: The constructed client. - """ - return IndexEndpointServiceClient.from_service_account_info.__func__(IndexEndpointServiceAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - IndexEndpointServiceAsyncClient: The constructed client. - """ - return IndexEndpointServiceClient.from_service_account_file.__func__(IndexEndpointServiceAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> IndexEndpointServiceTransport: - """Returns the transport used by the client instance. - - Returns: - IndexEndpointServiceTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(IndexEndpointServiceClient).get_transport_class, type(IndexEndpointServiceClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, IndexEndpointServiceTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the index endpoint service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.IndexEndpointServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = IndexEndpointServiceClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def create_index_endpoint(self, - request: Union[index_endpoint_service.CreateIndexEndpointRequest, dict] = None, - *, - parent: str = None, - index_endpoint: gca_index_endpoint.IndexEndpoint = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Creates an IndexEndpoint. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CreateIndexEndpointRequest, dict]): - The request object. Request message for - [IndexEndpointService.CreateIndexEndpoint][google.cloud.aiplatform.v1.IndexEndpointService.CreateIndexEndpoint]. - parent (:class:`str`): - Required. The resource name of the Location to create - the IndexEndpoint in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - index_endpoint (:class:`google.cloud.aiplatform_v1.types.IndexEndpoint`): - Required. The IndexEndpoint to - create. - - This corresponds to the ``index_endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.IndexEndpoint` Indexes are deployed into it. An IndexEndpoint can have multiple - DeployedIndexes. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, index_endpoint]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = index_endpoint_service.CreateIndexEndpointRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if index_endpoint is not None: - request.index_endpoint = index_endpoint - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_index_endpoint, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - gca_index_endpoint.IndexEndpoint, - metadata_type=index_endpoint_service.CreateIndexEndpointOperationMetadata, - ) - - # Done; return the response. - return response - - async def get_index_endpoint(self, - request: Union[index_endpoint_service.GetIndexEndpointRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> index_endpoint.IndexEndpoint: - r"""Gets an IndexEndpoint. - - Args: - request (Union[google.cloud.aiplatform_v1.types.GetIndexEndpointRequest, dict]): - The request object. Request message for - [IndexEndpointService.GetIndexEndpoint][google.cloud.aiplatform.v1.IndexEndpointService.GetIndexEndpoint] - name (:class:`str`): - Required. The name of the IndexEndpoint resource. - Format: - ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.IndexEndpoint: - Indexes are deployed into it. An - IndexEndpoint can have multiple - DeployedIndexes. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = index_endpoint_service.GetIndexEndpointRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_index_endpoint, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_index_endpoints(self, - request: Union[index_endpoint_service.ListIndexEndpointsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListIndexEndpointsAsyncPager: - r"""Lists IndexEndpoints in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListIndexEndpointsRequest, dict]): - The request object. Request message for - [IndexEndpointService.ListIndexEndpoints][google.cloud.aiplatform.v1.IndexEndpointService.ListIndexEndpoints]. - parent (:class:`str`): - Required. The resource name of the Location from which - to list the IndexEndpoints. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.index_endpoint_service.pagers.ListIndexEndpointsAsyncPager: - Response message for - [IndexEndpointService.ListIndexEndpoints][google.cloud.aiplatform.v1.IndexEndpointService.ListIndexEndpoints]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = index_endpoint_service.ListIndexEndpointsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_index_endpoints, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListIndexEndpointsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_index_endpoint(self, - request: Union[index_endpoint_service.UpdateIndexEndpointRequest, dict] = None, - *, - index_endpoint: gca_index_endpoint.IndexEndpoint = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_index_endpoint.IndexEndpoint: - r"""Updates an IndexEndpoint. - - Args: - request (Union[google.cloud.aiplatform_v1.types.UpdateIndexEndpointRequest, dict]): - The request object. Request message for - [IndexEndpointService.UpdateIndexEndpoint][google.cloud.aiplatform.v1.IndexEndpointService.UpdateIndexEndpoint]. - index_endpoint (:class:`google.cloud.aiplatform_v1.types.IndexEndpoint`): - Required. The IndexEndpoint which - replaces the resource on the server. - - This corresponds to the ``index_endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. The update mask applies to the resource. See - [google.protobuf.FieldMask][google.protobuf.FieldMask]. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.IndexEndpoint: - Indexes are deployed into it. An - IndexEndpoint can have multiple - DeployedIndexes. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([index_endpoint, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = index_endpoint_service.UpdateIndexEndpointRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if index_endpoint is not None: - request.index_endpoint = index_endpoint - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_index_endpoint, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("index_endpoint.name", request.index_endpoint.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_index_endpoint(self, - request: Union[index_endpoint_service.DeleteIndexEndpointRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes an IndexEndpoint. - - Args: - request (Union[google.cloud.aiplatform_v1.types.DeleteIndexEndpointRequest, dict]): - The request object. Request message for - [IndexEndpointService.DeleteIndexEndpoint][google.cloud.aiplatform.v1.IndexEndpointService.DeleteIndexEndpoint]. - name (:class:`str`): - Required. The name of the IndexEndpoint resource to be - deleted. Format: - ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = index_endpoint_service.DeleteIndexEndpointRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_index_endpoint, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def deploy_index(self, - request: Union[index_endpoint_service.DeployIndexRequest, dict] = None, - *, - index_endpoint: str = None, - deployed_index: gca_index_endpoint.DeployedIndex = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deploys an Index into this IndexEndpoint, creating a - DeployedIndex within it. - Only non-empty Indexes can be deployed. - - Args: - request (Union[google.cloud.aiplatform_v1.types.DeployIndexRequest, dict]): - The request object. Request message for - [IndexEndpointService.DeployIndex][google.cloud.aiplatform.v1.IndexEndpointService.DeployIndex]. - index_endpoint (:class:`str`): - Required. The name of the IndexEndpoint resource into - which to deploy an Index. Format: - ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` - - This corresponds to the ``index_endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - deployed_index (:class:`google.cloud.aiplatform_v1.types.DeployedIndex`): - Required. The DeployedIndex to be - created within the IndexEndpoint. - - This corresponds to the ``deployed_index`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1.types.DeployIndexResponse` - Response message for - [IndexEndpointService.DeployIndex][google.cloud.aiplatform.v1.IndexEndpointService.DeployIndex]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([index_endpoint, deployed_index]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = index_endpoint_service.DeployIndexRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if index_endpoint is not None: - request.index_endpoint = index_endpoint - if deployed_index is not None: - request.deployed_index = deployed_index - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.deploy_index, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("index_endpoint", request.index_endpoint), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - index_endpoint_service.DeployIndexResponse, - metadata_type=index_endpoint_service.DeployIndexOperationMetadata, - ) - - # Done; return the response. - return response - - async def undeploy_index(self, - request: Union[index_endpoint_service.UndeployIndexRequest, dict] = None, - *, - index_endpoint: str = None, - deployed_index_id: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Undeploys an Index from an IndexEndpoint, removing a - DeployedIndex from it, and freeing all resources it's - using. - - Args: - request (Union[google.cloud.aiplatform_v1.types.UndeployIndexRequest, dict]): - The request object. Request message for - [IndexEndpointService.UndeployIndex][google.cloud.aiplatform.v1.IndexEndpointService.UndeployIndex]. - index_endpoint (:class:`str`): - Required. The name of the IndexEndpoint resource from - which to undeploy an Index. Format: - ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` - - This corresponds to the ``index_endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - deployed_index_id (:class:`str`): - Required. The ID of the DeployedIndex - to be undeployed from the IndexEndpoint. - - This corresponds to the ``deployed_index_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1.types.UndeployIndexResponse` - Response message for - [IndexEndpointService.UndeployIndex][google.cloud.aiplatform.v1.IndexEndpointService.UndeployIndex]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([index_endpoint, deployed_index_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = index_endpoint_service.UndeployIndexRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if index_endpoint is not None: - request.index_endpoint = index_endpoint - if deployed_index_id is not None: - request.deployed_index_id = deployed_index_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.undeploy_index, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("index_endpoint", request.index_endpoint), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - index_endpoint_service.UndeployIndexResponse, - metadata_type=index_endpoint_service.UndeployIndexOperationMetadata, - ) - - # Done; return the response. - return response - - async def mutate_deployed_index(self, - request: Union[index_endpoint_service.MutateDeployedIndexRequest, dict] = None, - *, - index_endpoint: str = None, - deployed_index: gca_index_endpoint.DeployedIndex = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Update an existing DeployedIndex under an - IndexEndpoint. - - Args: - request (Union[google.cloud.aiplatform_v1.types.MutateDeployedIndexRequest, dict]): - The request object. Request message for - [IndexEndpointService.MutateDeployedIndex][google.cloud.aiplatform.v1.IndexEndpointService.MutateDeployedIndex]. - index_endpoint (:class:`str`): - Required. The name of the IndexEndpoint resource into - which to deploy an Index. Format: - ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` - - This corresponds to the ``index_endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - deployed_index (:class:`google.cloud.aiplatform_v1.types.DeployedIndex`): - Required. The DeployedIndex to be updated within the - IndexEndpoint. Currently, the updatable fields are - [DeployedIndex][automatic_resources] and - [DeployedIndex][dedicated_resources] - - This corresponds to the ``deployed_index`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1.types.MutateDeployedIndexResponse` - Response message for - [IndexEndpointService.MutateDeployedIndex][google.cloud.aiplatform.v1.IndexEndpointService.MutateDeployedIndex]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([index_endpoint, deployed_index]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = index_endpoint_service.MutateDeployedIndexRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if index_endpoint is not None: - request.index_endpoint = index_endpoint - if deployed_index is not None: - request.deployed_index = deployed_index - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.mutate_deployed_index, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("index_endpoint", request.index_endpoint), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - index_endpoint_service.MutateDeployedIndexResponse, - metadata_type=index_endpoint_service.MutateDeployedIndexOperationMetadata, - ) - - # Done; return the response. - return response - - async def __aenter__(self): - return self - - async def __aexit__(self, exc_type, exc, tb): - await self.transport.close() - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "IndexEndpointServiceAsyncClient", -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/client.py deleted file mode 100644 index 66825d79cf..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/client.py +++ /dev/null @@ -1,1132 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import os -import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1.services.index_endpoint_service import pagers -from google.cloud.aiplatform_v1.types import index_endpoint -from google.cloud.aiplatform_v1.types import index_endpoint as gca_index_endpoint -from google.cloud.aiplatform_v1.types import index_endpoint_service -from google.cloud.aiplatform_v1.types import operation as gca_operation -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import IndexEndpointServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import IndexEndpointServiceGrpcTransport -from .transports.grpc_asyncio import IndexEndpointServiceGrpcAsyncIOTransport - - -class IndexEndpointServiceClientMeta(type): - """Metaclass for the IndexEndpointService client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[IndexEndpointServiceTransport]] - _transport_registry["grpc"] = IndexEndpointServiceGrpcTransport - _transport_registry["grpc_asyncio"] = IndexEndpointServiceGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[IndexEndpointServiceTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class IndexEndpointServiceClient(metaclass=IndexEndpointServiceClientMeta): - """A service for managing Vertex AI's IndexEndpoints.""" - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "aiplatform.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - IndexEndpointServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - IndexEndpointServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> IndexEndpointServiceTransport: - """Returns the transport used by the client instance. - - Returns: - IndexEndpointServiceTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def index_path(project: str,location: str,index: str,) -> str: - """Returns a fully-qualified index string.""" - return "projects/{project}/locations/{location}/indexes/{index}".format(project=project, location=location, index=index, ) - - @staticmethod - def parse_index_path(path: str) -> Dict[str,str]: - """Parses a index path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/indexes/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def index_endpoint_path(project: str,location: str,index_endpoint: str,) -> str: - """Returns a fully-qualified index_endpoint string.""" - return "projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}".format(project=project, location=location, index_endpoint=index_endpoint, ) - - @staticmethod - def parse_index_endpoint_path(path: str) -> Dict[str,str]: - """Parses a index_endpoint path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/indexEndpoints/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, IndexEndpointServiceTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the index endpoint service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, IndexEndpointServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): - raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, IndexEndpointServiceTransport): - # transport is a IndexEndpointServiceTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - always_use_jwt_access=True, - ) - - def create_index_endpoint(self, - request: Union[index_endpoint_service.CreateIndexEndpointRequest, dict] = None, - *, - parent: str = None, - index_endpoint: gca_index_endpoint.IndexEndpoint = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Creates an IndexEndpoint. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CreateIndexEndpointRequest, dict]): - The request object. Request message for - [IndexEndpointService.CreateIndexEndpoint][google.cloud.aiplatform.v1.IndexEndpointService.CreateIndexEndpoint]. - parent (str): - Required. The resource name of the Location to create - the IndexEndpoint in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - index_endpoint (google.cloud.aiplatform_v1.types.IndexEndpoint): - Required. The IndexEndpoint to - create. - - This corresponds to the ``index_endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.IndexEndpoint` Indexes are deployed into it. An IndexEndpoint can have multiple - DeployedIndexes. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, index_endpoint]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a index_endpoint_service.CreateIndexEndpointRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, index_endpoint_service.CreateIndexEndpointRequest): - request = index_endpoint_service.CreateIndexEndpointRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if index_endpoint is not None: - request.index_endpoint = index_endpoint - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_index_endpoint] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - gca_index_endpoint.IndexEndpoint, - metadata_type=index_endpoint_service.CreateIndexEndpointOperationMetadata, - ) - - # Done; return the response. - return response - - def get_index_endpoint(self, - request: Union[index_endpoint_service.GetIndexEndpointRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> index_endpoint.IndexEndpoint: - r"""Gets an IndexEndpoint. - - Args: - request (Union[google.cloud.aiplatform_v1.types.GetIndexEndpointRequest, dict]): - The request object. Request message for - [IndexEndpointService.GetIndexEndpoint][google.cloud.aiplatform.v1.IndexEndpointService.GetIndexEndpoint] - name (str): - Required. The name of the IndexEndpoint resource. - Format: - ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.IndexEndpoint: - Indexes are deployed into it. An - IndexEndpoint can have multiple - DeployedIndexes. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a index_endpoint_service.GetIndexEndpointRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, index_endpoint_service.GetIndexEndpointRequest): - request = index_endpoint_service.GetIndexEndpointRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_index_endpoint] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_index_endpoints(self, - request: Union[index_endpoint_service.ListIndexEndpointsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListIndexEndpointsPager: - r"""Lists IndexEndpoints in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListIndexEndpointsRequest, dict]): - The request object. Request message for - [IndexEndpointService.ListIndexEndpoints][google.cloud.aiplatform.v1.IndexEndpointService.ListIndexEndpoints]. - parent (str): - Required. The resource name of the Location from which - to list the IndexEndpoints. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.index_endpoint_service.pagers.ListIndexEndpointsPager: - Response message for - [IndexEndpointService.ListIndexEndpoints][google.cloud.aiplatform.v1.IndexEndpointService.ListIndexEndpoints]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a index_endpoint_service.ListIndexEndpointsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, index_endpoint_service.ListIndexEndpointsRequest): - request = index_endpoint_service.ListIndexEndpointsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_index_endpoints] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListIndexEndpointsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_index_endpoint(self, - request: Union[index_endpoint_service.UpdateIndexEndpointRequest, dict] = None, - *, - index_endpoint: gca_index_endpoint.IndexEndpoint = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_index_endpoint.IndexEndpoint: - r"""Updates an IndexEndpoint. - - Args: - request (Union[google.cloud.aiplatform_v1.types.UpdateIndexEndpointRequest, dict]): - The request object. Request message for - [IndexEndpointService.UpdateIndexEndpoint][google.cloud.aiplatform.v1.IndexEndpointService.UpdateIndexEndpoint]. - index_endpoint (google.cloud.aiplatform_v1.types.IndexEndpoint): - Required. The IndexEndpoint which - replaces the resource on the server. - - This corresponds to the ``index_endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The update mask applies to the resource. See - [google.protobuf.FieldMask][google.protobuf.FieldMask]. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.IndexEndpoint: - Indexes are deployed into it. An - IndexEndpoint can have multiple - DeployedIndexes. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([index_endpoint, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a index_endpoint_service.UpdateIndexEndpointRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, index_endpoint_service.UpdateIndexEndpointRequest): - request = index_endpoint_service.UpdateIndexEndpointRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if index_endpoint is not None: - request.index_endpoint = index_endpoint - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_index_endpoint] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("index_endpoint.name", request.index_endpoint.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_index_endpoint(self, - request: Union[index_endpoint_service.DeleteIndexEndpointRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes an IndexEndpoint. - - Args: - request (Union[google.cloud.aiplatform_v1.types.DeleteIndexEndpointRequest, dict]): - The request object. Request message for - [IndexEndpointService.DeleteIndexEndpoint][google.cloud.aiplatform.v1.IndexEndpointService.DeleteIndexEndpoint]. - name (str): - Required. The name of the IndexEndpoint resource to be - deleted. Format: - ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a index_endpoint_service.DeleteIndexEndpointRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, index_endpoint_service.DeleteIndexEndpointRequest): - request = index_endpoint_service.DeleteIndexEndpointRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_index_endpoint] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def deploy_index(self, - request: Union[index_endpoint_service.DeployIndexRequest, dict] = None, - *, - index_endpoint: str = None, - deployed_index: gca_index_endpoint.DeployedIndex = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deploys an Index into this IndexEndpoint, creating a - DeployedIndex within it. - Only non-empty Indexes can be deployed. - - Args: - request (Union[google.cloud.aiplatform_v1.types.DeployIndexRequest, dict]): - The request object. Request message for - [IndexEndpointService.DeployIndex][google.cloud.aiplatform.v1.IndexEndpointService.DeployIndex]. - index_endpoint (str): - Required. The name of the IndexEndpoint resource into - which to deploy an Index. Format: - ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` - - This corresponds to the ``index_endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - deployed_index (google.cloud.aiplatform_v1.types.DeployedIndex): - Required. The DeployedIndex to be - created within the IndexEndpoint. - - This corresponds to the ``deployed_index`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1.types.DeployIndexResponse` - Response message for - [IndexEndpointService.DeployIndex][google.cloud.aiplatform.v1.IndexEndpointService.DeployIndex]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([index_endpoint, deployed_index]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a index_endpoint_service.DeployIndexRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, index_endpoint_service.DeployIndexRequest): - request = index_endpoint_service.DeployIndexRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if index_endpoint is not None: - request.index_endpoint = index_endpoint - if deployed_index is not None: - request.deployed_index = deployed_index - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.deploy_index] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("index_endpoint", request.index_endpoint), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - index_endpoint_service.DeployIndexResponse, - metadata_type=index_endpoint_service.DeployIndexOperationMetadata, - ) - - # Done; return the response. - return response - - def undeploy_index(self, - request: Union[index_endpoint_service.UndeployIndexRequest, dict] = None, - *, - index_endpoint: str = None, - deployed_index_id: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Undeploys an Index from an IndexEndpoint, removing a - DeployedIndex from it, and freeing all resources it's - using. - - Args: - request (Union[google.cloud.aiplatform_v1.types.UndeployIndexRequest, dict]): - The request object. Request message for - [IndexEndpointService.UndeployIndex][google.cloud.aiplatform.v1.IndexEndpointService.UndeployIndex]. - index_endpoint (str): - Required. The name of the IndexEndpoint resource from - which to undeploy an Index. Format: - ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` - - This corresponds to the ``index_endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - deployed_index_id (str): - Required. The ID of the DeployedIndex - to be undeployed from the IndexEndpoint. - - This corresponds to the ``deployed_index_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1.types.UndeployIndexResponse` - Response message for - [IndexEndpointService.UndeployIndex][google.cloud.aiplatform.v1.IndexEndpointService.UndeployIndex]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([index_endpoint, deployed_index_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a index_endpoint_service.UndeployIndexRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, index_endpoint_service.UndeployIndexRequest): - request = index_endpoint_service.UndeployIndexRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if index_endpoint is not None: - request.index_endpoint = index_endpoint - if deployed_index_id is not None: - request.deployed_index_id = deployed_index_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.undeploy_index] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("index_endpoint", request.index_endpoint), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - index_endpoint_service.UndeployIndexResponse, - metadata_type=index_endpoint_service.UndeployIndexOperationMetadata, - ) - - # Done; return the response. - return response - - def mutate_deployed_index(self, - request: Union[index_endpoint_service.MutateDeployedIndexRequest, dict] = None, - *, - index_endpoint: str = None, - deployed_index: gca_index_endpoint.DeployedIndex = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Update an existing DeployedIndex under an - IndexEndpoint. - - Args: - request (Union[google.cloud.aiplatform_v1.types.MutateDeployedIndexRequest, dict]): - The request object. Request message for - [IndexEndpointService.MutateDeployedIndex][google.cloud.aiplatform.v1.IndexEndpointService.MutateDeployedIndex]. - index_endpoint (str): - Required. The name of the IndexEndpoint resource into - which to deploy an Index. Format: - ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` - - This corresponds to the ``index_endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - deployed_index (google.cloud.aiplatform_v1.types.DeployedIndex): - Required. The DeployedIndex to be updated within the - IndexEndpoint. Currently, the updatable fields are - [DeployedIndex][automatic_resources] and - [DeployedIndex][dedicated_resources] - - This corresponds to the ``deployed_index`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1.types.MutateDeployedIndexResponse` - Response message for - [IndexEndpointService.MutateDeployedIndex][google.cloud.aiplatform.v1.IndexEndpointService.MutateDeployedIndex]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([index_endpoint, deployed_index]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a index_endpoint_service.MutateDeployedIndexRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, index_endpoint_service.MutateDeployedIndexRequest): - request = index_endpoint_service.MutateDeployedIndexRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if index_endpoint is not None: - request.index_endpoint = index_endpoint - if deployed_index is not None: - request.deployed_index = deployed_index - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.mutate_deployed_index] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("index_endpoint", request.index_endpoint), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - index_endpoint_service.MutateDeployedIndexResponse, - metadata_type=index_endpoint_service.MutateDeployedIndexOperationMetadata, - ) - - # Done; return the response. - return response - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - """Releases underlying transport's resources. - - .. warning:: - ONLY use as a context manager if the transport is NOT shared - with other clients! Exiting the with block will CLOSE the transport - and may cause errors in other clients! - """ - self.transport.close() - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "IndexEndpointServiceClient", -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/pagers.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/pagers.py deleted file mode 100644 index 2ab978707e..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/pagers.py +++ /dev/null @@ -1,141 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator - -from google.cloud.aiplatform_v1.types import index_endpoint -from google.cloud.aiplatform_v1.types import index_endpoint_service - - -class ListIndexEndpointsPager: - """A pager for iterating through ``list_index_endpoints`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListIndexEndpointsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``index_endpoints`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListIndexEndpoints`` requests and continue to iterate - through the ``index_endpoints`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListIndexEndpointsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., index_endpoint_service.ListIndexEndpointsResponse], - request: index_endpoint_service.ListIndexEndpointsRequest, - response: index_endpoint_service.ListIndexEndpointsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListIndexEndpointsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListIndexEndpointsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = index_endpoint_service.ListIndexEndpointsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[index_endpoint_service.ListIndexEndpointsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[index_endpoint.IndexEndpoint]: - for page in self.pages: - yield from page.index_endpoints - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListIndexEndpointsAsyncPager: - """A pager for iterating through ``list_index_endpoints`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListIndexEndpointsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``index_endpoints`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListIndexEndpoints`` requests and continue to iterate - through the ``index_endpoints`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListIndexEndpointsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[index_endpoint_service.ListIndexEndpointsResponse]], - request: index_endpoint_service.ListIndexEndpointsRequest, - response: index_endpoint_service.ListIndexEndpointsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListIndexEndpointsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListIndexEndpointsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = index_endpoint_service.ListIndexEndpointsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[index_endpoint_service.ListIndexEndpointsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[index_endpoint.IndexEndpoint]: - async def async_generator(): - async for page in self.pages: - for response in page.index_endpoints: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/__init__.py deleted file mode 100644 index 42d3519efd..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import IndexEndpointServiceTransport -from .grpc import IndexEndpointServiceGrpcTransport -from .grpc_asyncio import IndexEndpointServiceGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[IndexEndpointServiceTransport]] -_transport_registry['grpc'] = IndexEndpointServiceGrpcTransport -_transport_registry['grpc_asyncio'] = IndexEndpointServiceGrpcAsyncIOTransport - -__all__ = ( - 'IndexEndpointServiceTransport', - 'IndexEndpointServiceGrpcTransport', - 'IndexEndpointServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/base.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/base.py deleted file mode 100644 index 9ff09271d0..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/base.py +++ /dev/null @@ -1,253 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import pkg_resources - -import google.auth # type: ignore -import google.api_core -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.aiplatform_v1.types import index_endpoint -from google.cloud.aiplatform_v1.types import index_endpoint as gca_index_endpoint -from google.cloud.aiplatform_v1.types import index_endpoint_service -from google.longrunning import operations_pb2 # type: ignore - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-aiplatform', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -class IndexEndpointServiceTransport(abc.ABC): - """Abstract transport class for IndexEndpointService.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'aiplatform.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials are service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.create_index_endpoint: gapic_v1.method.wrap_method( - self.create_index_endpoint, - default_timeout=None, - client_info=client_info, - ), - self.get_index_endpoint: gapic_v1.method.wrap_method( - self.get_index_endpoint, - default_timeout=None, - client_info=client_info, - ), - self.list_index_endpoints: gapic_v1.method.wrap_method( - self.list_index_endpoints, - default_timeout=None, - client_info=client_info, - ), - self.update_index_endpoint: gapic_v1.method.wrap_method( - self.update_index_endpoint, - default_timeout=None, - client_info=client_info, - ), - self.delete_index_endpoint: gapic_v1.method.wrap_method( - self.delete_index_endpoint, - default_timeout=None, - client_info=client_info, - ), - self.deploy_index: gapic_v1.method.wrap_method( - self.deploy_index, - default_timeout=None, - client_info=client_info, - ), - self.undeploy_index: gapic_v1.method.wrap_method( - self.undeploy_index, - default_timeout=None, - client_info=client_info, - ), - self.mutate_deployed_index: gapic_v1.method.wrap_method( - self.mutate_deployed_index, - default_timeout=None, - client_info=client_info, - ), - } - - def close(self): - """Closes resources associated with the transport. - - .. warning:: - Only call this method if the transport is NOT shared - with other clients - this may cause errors in other clients! - """ - raise NotImplementedError() - - @property - def operations_client(self): - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def create_index_endpoint(self) -> Callable[ - [index_endpoint_service.CreateIndexEndpointRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def get_index_endpoint(self) -> Callable[ - [index_endpoint_service.GetIndexEndpointRequest], - Union[ - index_endpoint.IndexEndpoint, - Awaitable[index_endpoint.IndexEndpoint] - ]]: - raise NotImplementedError() - - @property - def list_index_endpoints(self) -> Callable[ - [index_endpoint_service.ListIndexEndpointsRequest], - Union[ - index_endpoint_service.ListIndexEndpointsResponse, - Awaitable[index_endpoint_service.ListIndexEndpointsResponse] - ]]: - raise NotImplementedError() - - @property - def update_index_endpoint(self) -> Callable[ - [index_endpoint_service.UpdateIndexEndpointRequest], - Union[ - gca_index_endpoint.IndexEndpoint, - Awaitable[gca_index_endpoint.IndexEndpoint] - ]]: - raise NotImplementedError() - - @property - def delete_index_endpoint(self) -> Callable[ - [index_endpoint_service.DeleteIndexEndpointRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def deploy_index(self) -> Callable[ - [index_endpoint_service.DeployIndexRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def undeploy_index(self) -> Callable[ - [index_endpoint_service.UndeployIndexRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def mutate_deployed_index(self) -> Callable[ - [index_endpoint_service.MutateDeployedIndexRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'IndexEndpointServiceTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc.py deleted file mode 100644 index e77a43ae3d..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc.py +++ /dev/null @@ -1,462 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers -from google.api_core import operations_v1 -from google.api_core import gapic_v1 -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.aiplatform_v1.types import index_endpoint -from google.cloud.aiplatform_v1.types import index_endpoint as gca_index_endpoint -from google.cloud.aiplatform_v1.types import index_endpoint_service -from google.longrunning import operations_pb2 # type: ignore -from .base import IndexEndpointServiceTransport, DEFAULT_CLIENT_INFO - - -class IndexEndpointServiceGrpcTransport(IndexEndpointServiceTransport): - """gRPC backend transport for IndexEndpointService. - - A service for managing Vertex AI's IndexEndpoints. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_index_endpoint(self) -> Callable[ - [index_endpoint_service.CreateIndexEndpointRequest], - operations_pb2.Operation]: - r"""Return a callable for the create index endpoint method over gRPC. - - Creates an IndexEndpoint. - - Returns: - Callable[[~.CreateIndexEndpointRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_index_endpoint' not in self._stubs: - self._stubs['create_index_endpoint'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.IndexEndpointService/CreateIndexEndpoint', - request_serializer=index_endpoint_service.CreateIndexEndpointRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_index_endpoint'] - - @property - def get_index_endpoint(self) -> Callable[ - [index_endpoint_service.GetIndexEndpointRequest], - index_endpoint.IndexEndpoint]: - r"""Return a callable for the get index endpoint method over gRPC. - - Gets an IndexEndpoint. - - Returns: - Callable[[~.GetIndexEndpointRequest], - ~.IndexEndpoint]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_index_endpoint' not in self._stubs: - self._stubs['get_index_endpoint'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.IndexEndpointService/GetIndexEndpoint', - request_serializer=index_endpoint_service.GetIndexEndpointRequest.serialize, - response_deserializer=index_endpoint.IndexEndpoint.deserialize, - ) - return self._stubs['get_index_endpoint'] - - @property - def list_index_endpoints(self) -> Callable[ - [index_endpoint_service.ListIndexEndpointsRequest], - index_endpoint_service.ListIndexEndpointsResponse]: - r"""Return a callable for the list index endpoints method over gRPC. - - Lists IndexEndpoints in a Location. - - Returns: - Callable[[~.ListIndexEndpointsRequest], - ~.ListIndexEndpointsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_index_endpoints' not in self._stubs: - self._stubs['list_index_endpoints'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.IndexEndpointService/ListIndexEndpoints', - request_serializer=index_endpoint_service.ListIndexEndpointsRequest.serialize, - response_deserializer=index_endpoint_service.ListIndexEndpointsResponse.deserialize, - ) - return self._stubs['list_index_endpoints'] - - @property - def update_index_endpoint(self) -> Callable[ - [index_endpoint_service.UpdateIndexEndpointRequest], - gca_index_endpoint.IndexEndpoint]: - r"""Return a callable for the update index endpoint method over gRPC. - - Updates an IndexEndpoint. - - Returns: - Callable[[~.UpdateIndexEndpointRequest], - ~.IndexEndpoint]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_index_endpoint' not in self._stubs: - self._stubs['update_index_endpoint'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.IndexEndpointService/UpdateIndexEndpoint', - request_serializer=index_endpoint_service.UpdateIndexEndpointRequest.serialize, - response_deserializer=gca_index_endpoint.IndexEndpoint.deserialize, - ) - return self._stubs['update_index_endpoint'] - - @property - def delete_index_endpoint(self) -> Callable[ - [index_endpoint_service.DeleteIndexEndpointRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete index endpoint method over gRPC. - - Deletes an IndexEndpoint. - - Returns: - Callable[[~.DeleteIndexEndpointRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_index_endpoint' not in self._stubs: - self._stubs['delete_index_endpoint'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.IndexEndpointService/DeleteIndexEndpoint', - request_serializer=index_endpoint_service.DeleteIndexEndpointRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_index_endpoint'] - - @property - def deploy_index(self) -> Callable[ - [index_endpoint_service.DeployIndexRequest], - operations_pb2.Operation]: - r"""Return a callable for the deploy index method over gRPC. - - Deploys an Index into this IndexEndpoint, creating a - DeployedIndex within it. - Only non-empty Indexes can be deployed. - - Returns: - Callable[[~.DeployIndexRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'deploy_index' not in self._stubs: - self._stubs['deploy_index'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.IndexEndpointService/DeployIndex', - request_serializer=index_endpoint_service.DeployIndexRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['deploy_index'] - - @property - def undeploy_index(self) -> Callable[ - [index_endpoint_service.UndeployIndexRequest], - operations_pb2.Operation]: - r"""Return a callable for the undeploy index method over gRPC. - - Undeploys an Index from an IndexEndpoint, removing a - DeployedIndex from it, and freeing all resources it's - using. - - Returns: - Callable[[~.UndeployIndexRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'undeploy_index' not in self._stubs: - self._stubs['undeploy_index'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.IndexEndpointService/UndeployIndex', - request_serializer=index_endpoint_service.UndeployIndexRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['undeploy_index'] - - @property - def mutate_deployed_index(self) -> Callable[ - [index_endpoint_service.MutateDeployedIndexRequest], - operations_pb2.Operation]: - r"""Return a callable for the mutate deployed index method over gRPC. - - Update an existing DeployedIndex under an - IndexEndpoint. - - Returns: - Callable[[~.MutateDeployedIndexRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'mutate_deployed_index' not in self._stubs: - self._stubs['mutate_deployed_index'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.IndexEndpointService/MutateDeployedIndex', - request_serializer=index_endpoint_service.MutateDeployedIndexRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['mutate_deployed_index'] - - def close(self): - self.grpc_channel.close() - -__all__ = ( - 'IndexEndpointServiceGrpcTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc_asyncio.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc_asyncio.py deleted file mode 100644 index 0742560d91..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/grpc_asyncio.py +++ /dev/null @@ -1,466 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers_async -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.aiplatform_v1.types import index_endpoint -from google.cloud.aiplatform_v1.types import index_endpoint as gca_index_endpoint -from google.cloud.aiplatform_v1.types import index_endpoint_service -from google.longrunning import operations_pb2 # type: ignore -from .base import IndexEndpointServiceTransport, DEFAULT_CLIENT_INFO -from .grpc import IndexEndpointServiceGrpcTransport - - -class IndexEndpointServiceGrpcAsyncIOTransport(IndexEndpointServiceTransport): - """gRPC AsyncIO backend transport for IndexEndpointService. - - A service for managing Vertex AI's IndexEndpoints. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsAsyncClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_index_endpoint(self) -> Callable[ - [index_endpoint_service.CreateIndexEndpointRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the create index endpoint method over gRPC. - - Creates an IndexEndpoint. - - Returns: - Callable[[~.CreateIndexEndpointRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_index_endpoint' not in self._stubs: - self._stubs['create_index_endpoint'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.IndexEndpointService/CreateIndexEndpoint', - request_serializer=index_endpoint_service.CreateIndexEndpointRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_index_endpoint'] - - @property - def get_index_endpoint(self) -> Callable[ - [index_endpoint_service.GetIndexEndpointRequest], - Awaitable[index_endpoint.IndexEndpoint]]: - r"""Return a callable for the get index endpoint method over gRPC. - - Gets an IndexEndpoint. - - Returns: - Callable[[~.GetIndexEndpointRequest], - Awaitable[~.IndexEndpoint]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_index_endpoint' not in self._stubs: - self._stubs['get_index_endpoint'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.IndexEndpointService/GetIndexEndpoint', - request_serializer=index_endpoint_service.GetIndexEndpointRequest.serialize, - response_deserializer=index_endpoint.IndexEndpoint.deserialize, - ) - return self._stubs['get_index_endpoint'] - - @property - def list_index_endpoints(self) -> Callable[ - [index_endpoint_service.ListIndexEndpointsRequest], - Awaitable[index_endpoint_service.ListIndexEndpointsResponse]]: - r"""Return a callable for the list index endpoints method over gRPC. - - Lists IndexEndpoints in a Location. - - Returns: - Callable[[~.ListIndexEndpointsRequest], - Awaitable[~.ListIndexEndpointsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_index_endpoints' not in self._stubs: - self._stubs['list_index_endpoints'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.IndexEndpointService/ListIndexEndpoints', - request_serializer=index_endpoint_service.ListIndexEndpointsRequest.serialize, - response_deserializer=index_endpoint_service.ListIndexEndpointsResponse.deserialize, - ) - return self._stubs['list_index_endpoints'] - - @property - def update_index_endpoint(self) -> Callable[ - [index_endpoint_service.UpdateIndexEndpointRequest], - Awaitable[gca_index_endpoint.IndexEndpoint]]: - r"""Return a callable for the update index endpoint method over gRPC. - - Updates an IndexEndpoint. - - Returns: - Callable[[~.UpdateIndexEndpointRequest], - Awaitable[~.IndexEndpoint]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_index_endpoint' not in self._stubs: - self._stubs['update_index_endpoint'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.IndexEndpointService/UpdateIndexEndpoint', - request_serializer=index_endpoint_service.UpdateIndexEndpointRequest.serialize, - response_deserializer=gca_index_endpoint.IndexEndpoint.deserialize, - ) - return self._stubs['update_index_endpoint'] - - @property - def delete_index_endpoint(self) -> Callable[ - [index_endpoint_service.DeleteIndexEndpointRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete index endpoint method over gRPC. - - Deletes an IndexEndpoint. - - Returns: - Callable[[~.DeleteIndexEndpointRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_index_endpoint' not in self._stubs: - self._stubs['delete_index_endpoint'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.IndexEndpointService/DeleteIndexEndpoint', - request_serializer=index_endpoint_service.DeleteIndexEndpointRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_index_endpoint'] - - @property - def deploy_index(self) -> Callable[ - [index_endpoint_service.DeployIndexRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the deploy index method over gRPC. - - Deploys an Index into this IndexEndpoint, creating a - DeployedIndex within it. - Only non-empty Indexes can be deployed. - - Returns: - Callable[[~.DeployIndexRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'deploy_index' not in self._stubs: - self._stubs['deploy_index'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.IndexEndpointService/DeployIndex', - request_serializer=index_endpoint_service.DeployIndexRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['deploy_index'] - - @property - def undeploy_index(self) -> Callable[ - [index_endpoint_service.UndeployIndexRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the undeploy index method over gRPC. - - Undeploys an Index from an IndexEndpoint, removing a - DeployedIndex from it, and freeing all resources it's - using. - - Returns: - Callable[[~.UndeployIndexRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'undeploy_index' not in self._stubs: - self._stubs['undeploy_index'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.IndexEndpointService/UndeployIndex', - request_serializer=index_endpoint_service.UndeployIndexRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['undeploy_index'] - - @property - def mutate_deployed_index(self) -> Callable[ - [index_endpoint_service.MutateDeployedIndexRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the mutate deployed index method over gRPC. - - Update an existing DeployedIndex under an - IndexEndpoint. - - Returns: - Callable[[~.MutateDeployedIndexRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'mutate_deployed_index' not in self._stubs: - self._stubs['mutate_deployed_index'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.IndexEndpointService/MutateDeployedIndex', - request_serializer=index_endpoint_service.MutateDeployedIndexRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['mutate_deployed_index'] - - def close(self): - return self.grpc_channel.close() - - -__all__ = ( - 'IndexEndpointServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/__init__.py deleted file mode 100644 index d2a09db9f1..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import IndexServiceClient -from .async_client import IndexServiceAsyncClient - -__all__ = ( - 'IndexServiceClient', - 'IndexServiceAsyncClient', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/async_client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/async_client.py deleted file mode 100644 index 182e4dbf5d..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/async_client.py +++ /dev/null @@ -1,640 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core.client_options import ClientOptions -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1.services.index_service import pagers -from google.cloud.aiplatform_v1.types import deployed_index_ref -from google.cloud.aiplatform_v1.types import index -from google.cloud.aiplatform_v1.types import index as gca_index -from google.cloud.aiplatform_v1.types import index_service -from google.cloud.aiplatform_v1.types import operation as gca_operation -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import IndexServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import IndexServiceGrpcAsyncIOTransport -from .client import IndexServiceClient - - -class IndexServiceAsyncClient: - """A service for creating and managing Vertex AI's Index - resources. - """ - - _client: IndexServiceClient - - DEFAULT_ENDPOINT = IndexServiceClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = IndexServiceClient.DEFAULT_MTLS_ENDPOINT - - index_path = staticmethod(IndexServiceClient.index_path) - parse_index_path = staticmethod(IndexServiceClient.parse_index_path) - index_endpoint_path = staticmethod(IndexServiceClient.index_endpoint_path) - parse_index_endpoint_path = staticmethod(IndexServiceClient.parse_index_endpoint_path) - common_billing_account_path = staticmethod(IndexServiceClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(IndexServiceClient.parse_common_billing_account_path) - common_folder_path = staticmethod(IndexServiceClient.common_folder_path) - parse_common_folder_path = staticmethod(IndexServiceClient.parse_common_folder_path) - common_organization_path = staticmethod(IndexServiceClient.common_organization_path) - parse_common_organization_path = staticmethod(IndexServiceClient.parse_common_organization_path) - common_project_path = staticmethod(IndexServiceClient.common_project_path) - parse_common_project_path = staticmethod(IndexServiceClient.parse_common_project_path) - common_location_path = staticmethod(IndexServiceClient.common_location_path) - parse_common_location_path = staticmethod(IndexServiceClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - IndexServiceAsyncClient: The constructed client. - """ - return IndexServiceClient.from_service_account_info.__func__(IndexServiceAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - IndexServiceAsyncClient: The constructed client. - """ - return IndexServiceClient.from_service_account_file.__func__(IndexServiceAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> IndexServiceTransport: - """Returns the transport used by the client instance. - - Returns: - IndexServiceTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(IndexServiceClient).get_transport_class, type(IndexServiceClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, IndexServiceTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the index service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.IndexServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = IndexServiceClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def create_index(self, - request: Union[index_service.CreateIndexRequest, dict] = None, - *, - parent: str = None, - index: gca_index.Index = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Creates an Index. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CreateIndexRequest, dict]): - The request object. Request message for - [IndexService.CreateIndex][google.cloud.aiplatform.v1.IndexService.CreateIndex]. - parent (:class:`str`): - Required. The resource name of the Location to create - the Index in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - index (:class:`google.cloud.aiplatform_v1.types.Index`): - Required. The Index to create. - This corresponds to the ``index`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Index` A representation of a collection of database items organized in a way that - allows for approximate nearest neighbor (a.k.a ANN) - algorithms search. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, index]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = index_service.CreateIndexRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if index is not None: - request.index = index - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_index, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - gca_index.Index, - metadata_type=index_service.CreateIndexOperationMetadata, - ) - - # Done; return the response. - return response - - async def get_index(self, - request: Union[index_service.GetIndexRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> index.Index: - r"""Gets an Index. - - Args: - request (Union[google.cloud.aiplatform_v1.types.GetIndexRequest, dict]): - The request object. Request message for - [IndexService.GetIndex][google.cloud.aiplatform.v1.IndexService.GetIndex] - name (:class:`str`): - Required. The name of the Index resource. Format: - ``projects/{project}/locations/{location}/indexes/{index}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Index: - A representation of a collection of - database items organized in a way that - allows for approximate nearest neighbor - (a.k.a ANN) algorithms search. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = index_service.GetIndexRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_index, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_indexes(self, - request: Union[index_service.ListIndexesRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListIndexesAsyncPager: - r"""Lists Indexes in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListIndexesRequest, dict]): - The request object. Request message for - [IndexService.ListIndexes][google.cloud.aiplatform.v1.IndexService.ListIndexes]. - parent (:class:`str`): - Required. The resource name of the Location from which - to list the Indexes. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.index_service.pagers.ListIndexesAsyncPager: - Response message for - [IndexService.ListIndexes][google.cloud.aiplatform.v1.IndexService.ListIndexes]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = index_service.ListIndexesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_indexes, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListIndexesAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_index(self, - request: Union[index_service.UpdateIndexRequest, dict] = None, - *, - index: gca_index.Index = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Updates an Index. - - Args: - request (Union[google.cloud.aiplatform_v1.types.UpdateIndexRequest, dict]): - The request object. Request message for - [IndexService.UpdateIndex][google.cloud.aiplatform.v1.IndexService.UpdateIndex]. - index (:class:`google.cloud.aiplatform_v1.types.Index`): - Required. The Index which updates the - resource on the server. - - This corresponds to the ``index`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - The update mask applies to the resource. For the - ``FieldMask`` definition, see - [google.protobuf.FieldMask][google.protobuf.FieldMask]. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Index` A representation of a collection of database items organized in a way that - allows for approximate nearest neighbor (a.k.a ANN) - algorithms search. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([index, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = index_service.UpdateIndexRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if index is not None: - request.index = index - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_index, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("index.name", request.index.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - gca_index.Index, - metadata_type=index_service.UpdateIndexOperationMetadata, - ) - - # Done; return the response. - return response - - async def delete_index(self, - request: Union[index_service.DeleteIndexRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes an Index. An Index can only be deleted when all its - [DeployedIndexes][google.cloud.aiplatform.v1.Index.deployed_indexes] - had been undeployed. - - Args: - request (Union[google.cloud.aiplatform_v1.types.DeleteIndexRequest, dict]): - The request object. Request message for - [IndexService.DeleteIndex][google.cloud.aiplatform.v1.IndexService.DeleteIndex]. - name (:class:`str`): - Required. The name of the Index resource to be deleted. - Format: - ``projects/{project}/locations/{location}/indexes/{index}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = index_service.DeleteIndexRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_index, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def __aenter__(self): - return self - - async def __aexit__(self, exc_type, exc, tb): - await self.transport.close() - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "IndexServiceAsyncClient", -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/client.py deleted file mode 100644 index f8201dd7f3..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/client.py +++ /dev/null @@ -1,847 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import os -import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1.services.index_service import pagers -from google.cloud.aiplatform_v1.types import deployed_index_ref -from google.cloud.aiplatform_v1.types import index -from google.cloud.aiplatform_v1.types import index as gca_index -from google.cloud.aiplatform_v1.types import index_service -from google.cloud.aiplatform_v1.types import operation as gca_operation -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import IndexServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import IndexServiceGrpcTransport -from .transports.grpc_asyncio import IndexServiceGrpcAsyncIOTransport - - -class IndexServiceClientMeta(type): - """Metaclass for the IndexService client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[IndexServiceTransport]] - _transport_registry["grpc"] = IndexServiceGrpcTransport - _transport_registry["grpc_asyncio"] = IndexServiceGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[IndexServiceTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class IndexServiceClient(metaclass=IndexServiceClientMeta): - """A service for creating and managing Vertex AI's Index - resources. - """ - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "aiplatform.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - IndexServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - IndexServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> IndexServiceTransport: - """Returns the transport used by the client instance. - - Returns: - IndexServiceTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def index_path(project: str,location: str,index: str,) -> str: - """Returns a fully-qualified index string.""" - return "projects/{project}/locations/{location}/indexes/{index}".format(project=project, location=location, index=index, ) - - @staticmethod - def parse_index_path(path: str) -> Dict[str,str]: - """Parses a index path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/indexes/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def index_endpoint_path(project: str,location: str,index_endpoint: str,) -> str: - """Returns a fully-qualified index_endpoint string.""" - return "projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}".format(project=project, location=location, index_endpoint=index_endpoint, ) - - @staticmethod - def parse_index_endpoint_path(path: str) -> Dict[str,str]: - """Parses a index_endpoint path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/indexEndpoints/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, IndexServiceTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the index service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, IndexServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): - raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, IndexServiceTransport): - # transport is a IndexServiceTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - always_use_jwt_access=True, - ) - - def create_index(self, - request: Union[index_service.CreateIndexRequest, dict] = None, - *, - parent: str = None, - index: gca_index.Index = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Creates an Index. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CreateIndexRequest, dict]): - The request object. Request message for - [IndexService.CreateIndex][google.cloud.aiplatform.v1.IndexService.CreateIndex]. - parent (str): - Required. The resource name of the Location to create - the Index in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - index (google.cloud.aiplatform_v1.types.Index): - Required. The Index to create. - This corresponds to the ``index`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Index` A representation of a collection of database items organized in a way that - allows for approximate nearest neighbor (a.k.a ANN) - algorithms search. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, index]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a index_service.CreateIndexRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, index_service.CreateIndexRequest): - request = index_service.CreateIndexRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if index is not None: - request.index = index - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_index] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - gca_index.Index, - metadata_type=index_service.CreateIndexOperationMetadata, - ) - - # Done; return the response. - return response - - def get_index(self, - request: Union[index_service.GetIndexRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> index.Index: - r"""Gets an Index. - - Args: - request (Union[google.cloud.aiplatform_v1.types.GetIndexRequest, dict]): - The request object. Request message for - [IndexService.GetIndex][google.cloud.aiplatform.v1.IndexService.GetIndex] - name (str): - Required. The name of the Index resource. Format: - ``projects/{project}/locations/{location}/indexes/{index}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Index: - A representation of a collection of - database items organized in a way that - allows for approximate nearest neighbor - (a.k.a ANN) algorithms search. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a index_service.GetIndexRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, index_service.GetIndexRequest): - request = index_service.GetIndexRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_index] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_indexes(self, - request: Union[index_service.ListIndexesRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListIndexesPager: - r"""Lists Indexes in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListIndexesRequest, dict]): - The request object. Request message for - [IndexService.ListIndexes][google.cloud.aiplatform.v1.IndexService.ListIndexes]. - parent (str): - Required. The resource name of the Location from which - to list the Indexes. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.index_service.pagers.ListIndexesPager: - Response message for - [IndexService.ListIndexes][google.cloud.aiplatform.v1.IndexService.ListIndexes]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a index_service.ListIndexesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, index_service.ListIndexesRequest): - request = index_service.ListIndexesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_indexes] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListIndexesPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_index(self, - request: Union[index_service.UpdateIndexRequest, dict] = None, - *, - index: gca_index.Index = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Updates an Index. - - Args: - request (Union[google.cloud.aiplatform_v1.types.UpdateIndexRequest, dict]): - The request object. Request message for - [IndexService.UpdateIndex][google.cloud.aiplatform.v1.IndexService.UpdateIndex]. - index (google.cloud.aiplatform_v1.types.Index): - Required. The Index which updates the - resource on the server. - - This corresponds to the ``index`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - The update mask applies to the resource. For the - ``FieldMask`` definition, see - [google.protobuf.FieldMask][google.protobuf.FieldMask]. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Index` A representation of a collection of database items organized in a way that - allows for approximate nearest neighbor (a.k.a ANN) - algorithms search. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([index, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a index_service.UpdateIndexRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, index_service.UpdateIndexRequest): - request = index_service.UpdateIndexRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if index is not None: - request.index = index - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_index] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("index.name", request.index.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - gca_index.Index, - metadata_type=index_service.UpdateIndexOperationMetadata, - ) - - # Done; return the response. - return response - - def delete_index(self, - request: Union[index_service.DeleteIndexRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes an Index. An Index can only be deleted when all its - [DeployedIndexes][google.cloud.aiplatform.v1.Index.deployed_indexes] - had been undeployed. - - Args: - request (Union[google.cloud.aiplatform_v1.types.DeleteIndexRequest, dict]): - The request object. Request message for - [IndexService.DeleteIndex][google.cloud.aiplatform.v1.IndexService.DeleteIndex]. - name (str): - Required. The name of the Index resource to be deleted. - Format: - ``projects/{project}/locations/{location}/indexes/{index}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a index_service.DeleteIndexRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, index_service.DeleteIndexRequest): - request = index_service.DeleteIndexRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_index] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - """Releases underlying transport's resources. - - .. warning:: - ONLY use as a context manager if the transport is NOT shared - with other clients! Exiting the with block will CLOSE the transport - and may cause errors in other clients! - """ - self.transport.close() - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "IndexServiceClient", -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/pagers.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/pagers.py deleted file mode 100644 index af84e71f7d..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/pagers.py +++ /dev/null @@ -1,141 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator - -from google.cloud.aiplatform_v1.types import index -from google.cloud.aiplatform_v1.types import index_service - - -class ListIndexesPager: - """A pager for iterating through ``list_indexes`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListIndexesResponse` object, and - provides an ``__iter__`` method to iterate through its - ``indexes`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListIndexes`` requests and continue to iterate - through the ``indexes`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListIndexesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., index_service.ListIndexesResponse], - request: index_service.ListIndexesRequest, - response: index_service.ListIndexesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListIndexesRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListIndexesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = index_service.ListIndexesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[index_service.ListIndexesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[index.Index]: - for page in self.pages: - yield from page.indexes - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListIndexesAsyncPager: - """A pager for iterating through ``list_indexes`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListIndexesResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``indexes`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListIndexes`` requests and continue to iterate - through the ``indexes`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListIndexesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[index_service.ListIndexesResponse]], - request: index_service.ListIndexesRequest, - response: index_service.ListIndexesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListIndexesRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListIndexesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = index_service.ListIndexesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[index_service.ListIndexesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[index.Index]: - async def async_generator(): - async for page in self.pages: - for response in page.indexes: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/transports/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/transports/__init__.py deleted file mode 100644 index 2f263f2fb8..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import IndexServiceTransport -from .grpc import IndexServiceGrpcTransport -from .grpc_asyncio import IndexServiceGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[IndexServiceTransport]] -_transport_registry['grpc'] = IndexServiceGrpcTransport -_transport_registry['grpc_asyncio'] = IndexServiceGrpcAsyncIOTransport - -__all__ = ( - 'IndexServiceTransport', - 'IndexServiceGrpcTransport', - 'IndexServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/transports/base.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/transports/base.py deleted file mode 100644 index eded18ea24..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/transports/base.py +++ /dev/null @@ -1,210 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import pkg_resources - -import google.auth # type: ignore -import google.api_core -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.aiplatform_v1.types import index -from google.cloud.aiplatform_v1.types import index_service -from google.longrunning import operations_pb2 # type: ignore - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-aiplatform', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -class IndexServiceTransport(abc.ABC): - """Abstract transport class for IndexService.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'aiplatform.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials are service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.create_index: gapic_v1.method.wrap_method( - self.create_index, - default_timeout=None, - client_info=client_info, - ), - self.get_index: gapic_v1.method.wrap_method( - self.get_index, - default_timeout=None, - client_info=client_info, - ), - self.list_indexes: gapic_v1.method.wrap_method( - self.list_indexes, - default_timeout=None, - client_info=client_info, - ), - self.update_index: gapic_v1.method.wrap_method( - self.update_index, - default_timeout=None, - client_info=client_info, - ), - self.delete_index: gapic_v1.method.wrap_method( - self.delete_index, - default_timeout=None, - client_info=client_info, - ), - } - - def close(self): - """Closes resources associated with the transport. - - .. warning:: - Only call this method if the transport is NOT shared - with other clients - this may cause errors in other clients! - """ - raise NotImplementedError() - - @property - def operations_client(self): - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def create_index(self) -> Callable[ - [index_service.CreateIndexRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def get_index(self) -> Callable[ - [index_service.GetIndexRequest], - Union[ - index.Index, - Awaitable[index.Index] - ]]: - raise NotImplementedError() - - @property - def list_indexes(self) -> Callable[ - [index_service.ListIndexesRequest], - Union[ - index_service.ListIndexesResponse, - Awaitable[index_service.ListIndexesResponse] - ]]: - raise NotImplementedError() - - @property - def update_index(self) -> Callable[ - [index_service.UpdateIndexRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def delete_index(self) -> Callable[ - [index_service.DeleteIndexRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'IndexServiceTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/transports/grpc.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/transports/grpc.py deleted file mode 100644 index 85993584bb..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/transports/grpc.py +++ /dev/null @@ -1,381 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers -from google.api_core import operations_v1 -from google.api_core import gapic_v1 -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.aiplatform_v1.types import index -from google.cloud.aiplatform_v1.types import index_service -from google.longrunning import operations_pb2 # type: ignore -from .base import IndexServiceTransport, DEFAULT_CLIENT_INFO - - -class IndexServiceGrpcTransport(IndexServiceTransport): - """gRPC backend transport for IndexService. - - A service for creating and managing Vertex AI's Index - resources. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_index(self) -> Callable[ - [index_service.CreateIndexRequest], - operations_pb2.Operation]: - r"""Return a callable for the create index method over gRPC. - - Creates an Index. - - Returns: - Callable[[~.CreateIndexRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_index' not in self._stubs: - self._stubs['create_index'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.IndexService/CreateIndex', - request_serializer=index_service.CreateIndexRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_index'] - - @property - def get_index(self) -> Callable[ - [index_service.GetIndexRequest], - index.Index]: - r"""Return a callable for the get index method over gRPC. - - Gets an Index. - - Returns: - Callable[[~.GetIndexRequest], - ~.Index]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_index' not in self._stubs: - self._stubs['get_index'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.IndexService/GetIndex', - request_serializer=index_service.GetIndexRequest.serialize, - response_deserializer=index.Index.deserialize, - ) - return self._stubs['get_index'] - - @property - def list_indexes(self) -> Callable[ - [index_service.ListIndexesRequest], - index_service.ListIndexesResponse]: - r"""Return a callable for the list indexes method over gRPC. - - Lists Indexes in a Location. - - Returns: - Callable[[~.ListIndexesRequest], - ~.ListIndexesResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_indexes' not in self._stubs: - self._stubs['list_indexes'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.IndexService/ListIndexes', - request_serializer=index_service.ListIndexesRequest.serialize, - response_deserializer=index_service.ListIndexesResponse.deserialize, - ) - return self._stubs['list_indexes'] - - @property - def update_index(self) -> Callable[ - [index_service.UpdateIndexRequest], - operations_pb2.Operation]: - r"""Return a callable for the update index method over gRPC. - - Updates an Index. - - Returns: - Callable[[~.UpdateIndexRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_index' not in self._stubs: - self._stubs['update_index'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.IndexService/UpdateIndex', - request_serializer=index_service.UpdateIndexRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['update_index'] - - @property - def delete_index(self) -> Callable[ - [index_service.DeleteIndexRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete index method over gRPC. - - Deletes an Index. An Index can only be deleted when all its - [DeployedIndexes][google.cloud.aiplatform.v1.Index.deployed_indexes] - had been undeployed. - - Returns: - Callable[[~.DeleteIndexRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_index' not in self._stubs: - self._stubs['delete_index'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.IndexService/DeleteIndex', - request_serializer=index_service.DeleteIndexRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_index'] - - def close(self): - self.grpc_channel.close() - -__all__ = ( - 'IndexServiceGrpcTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/transports/grpc_asyncio.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/transports/grpc_asyncio.py deleted file mode 100644 index ad866f79db..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/index_service/transports/grpc_asyncio.py +++ /dev/null @@ -1,385 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers_async -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.aiplatform_v1.types import index -from google.cloud.aiplatform_v1.types import index_service -from google.longrunning import operations_pb2 # type: ignore -from .base import IndexServiceTransport, DEFAULT_CLIENT_INFO -from .grpc import IndexServiceGrpcTransport - - -class IndexServiceGrpcAsyncIOTransport(IndexServiceTransport): - """gRPC AsyncIO backend transport for IndexService. - - A service for creating and managing Vertex AI's Index - resources. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsAsyncClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_index(self) -> Callable[ - [index_service.CreateIndexRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the create index method over gRPC. - - Creates an Index. - - Returns: - Callable[[~.CreateIndexRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_index' not in self._stubs: - self._stubs['create_index'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.IndexService/CreateIndex', - request_serializer=index_service.CreateIndexRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_index'] - - @property - def get_index(self) -> Callable[ - [index_service.GetIndexRequest], - Awaitable[index.Index]]: - r"""Return a callable for the get index method over gRPC. - - Gets an Index. - - Returns: - Callable[[~.GetIndexRequest], - Awaitable[~.Index]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_index' not in self._stubs: - self._stubs['get_index'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.IndexService/GetIndex', - request_serializer=index_service.GetIndexRequest.serialize, - response_deserializer=index.Index.deserialize, - ) - return self._stubs['get_index'] - - @property - def list_indexes(self) -> Callable[ - [index_service.ListIndexesRequest], - Awaitable[index_service.ListIndexesResponse]]: - r"""Return a callable for the list indexes method over gRPC. - - Lists Indexes in a Location. - - Returns: - Callable[[~.ListIndexesRequest], - Awaitable[~.ListIndexesResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_indexes' not in self._stubs: - self._stubs['list_indexes'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.IndexService/ListIndexes', - request_serializer=index_service.ListIndexesRequest.serialize, - response_deserializer=index_service.ListIndexesResponse.deserialize, - ) - return self._stubs['list_indexes'] - - @property - def update_index(self) -> Callable[ - [index_service.UpdateIndexRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the update index method over gRPC. - - Updates an Index. - - Returns: - Callable[[~.UpdateIndexRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_index' not in self._stubs: - self._stubs['update_index'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.IndexService/UpdateIndex', - request_serializer=index_service.UpdateIndexRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['update_index'] - - @property - def delete_index(self) -> Callable[ - [index_service.DeleteIndexRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete index method over gRPC. - - Deletes an Index. An Index can only be deleted when all its - [DeployedIndexes][google.cloud.aiplatform.v1.Index.deployed_indexes] - had been undeployed. - - Returns: - Callable[[~.DeleteIndexRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_index' not in self._stubs: - self._stubs['delete_index'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.IndexService/DeleteIndex', - request_serializer=index_service.DeleteIndexRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_index'] - - def close(self): - return self.grpc_channel.close() - - -__all__ = ( - 'IndexServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/__init__.py deleted file mode 100644 index 817e1b49e2..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import JobServiceClient -from .async_client import JobServiceAsyncClient - -__all__ = ( - 'JobServiceClient', - 'JobServiceAsyncClient', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/async_client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/async_client.py deleted file mode 100644 index 4dee75b46c..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/async_client.py +++ /dev/null @@ -1,2649 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core.client_options import ClientOptions -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1.services.job_service import pagers -from google.cloud.aiplatform_v1.types import batch_prediction_job -from google.cloud.aiplatform_v1.types import batch_prediction_job as gca_batch_prediction_job -from google.cloud.aiplatform_v1.types import completion_stats -from google.cloud.aiplatform_v1.types import custom_job -from google.cloud.aiplatform_v1.types import custom_job as gca_custom_job -from google.cloud.aiplatform_v1.types import data_labeling_job -from google.cloud.aiplatform_v1.types import data_labeling_job as gca_data_labeling_job -from google.cloud.aiplatform_v1.types import encryption_spec -from google.cloud.aiplatform_v1.types import explanation -from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job -from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job as gca_hyperparameter_tuning_job -from google.cloud.aiplatform_v1.types import io -from google.cloud.aiplatform_v1.types import job_service -from google.cloud.aiplatform_v1.types import job_state -from google.cloud.aiplatform_v1.types import machine_resources -from google.cloud.aiplatform_v1.types import manual_batch_tuning_parameters -from google.cloud.aiplatform_v1.types import model_deployment_monitoring_job -from google.cloud.aiplatform_v1.types import model_deployment_monitoring_job as gca_model_deployment_monitoring_job -from google.cloud.aiplatform_v1.types import model_monitoring -from google.cloud.aiplatform_v1.types import operation as gca_operation -from google.cloud.aiplatform_v1.types import study -from google.cloud.aiplatform_v1.types import unmanaged_container_model -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore -from google.type import money_pb2 # type: ignore -from .transports.base import JobServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import JobServiceGrpcAsyncIOTransport -from .client import JobServiceClient - - -class JobServiceAsyncClient: - """A service for creating and managing Vertex AI's jobs.""" - - _client: JobServiceClient - - DEFAULT_ENDPOINT = JobServiceClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = JobServiceClient.DEFAULT_MTLS_ENDPOINT - - batch_prediction_job_path = staticmethod(JobServiceClient.batch_prediction_job_path) - parse_batch_prediction_job_path = staticmethod(JobServiceClient.parse_batch_prediction_job_path) - custom_job_path = staticmethod(JobServiceClient.custom_job_path) - parse_custom_job_path = staticmethod(JobServiceClient.parse_custom_job_path) - data_labeling_job_path = staticmethod(JobServiceClient.data_labeling_job_path) - parse_data_labeling_job_path = staticmethod(JobServiceClient.parse_data_labeling_job_path) - dataset_path = staticmethod(JobServiceClient.dataset_path) - parse_dataset_path = staticmethod(JobServiceClient.parse_dataset_path) - endpoint_path = staticmethod(JobServiceClient.endpoint_path) - parse_endpoint_path = staticmethod(JobServiceClient.parse_endpoint_path) - hyperparameter_tuning_job_path = staticmethod(JobServiceClient.hyperparameter_tuning_job_path) - parse_hyperparameter_tuning_job_path = staticmethod(JobServiceClient.parse_hyperparameter_tuning_job_path) - model_path = staticmethod(JobServiceClient.model_path) - parse_model_path = staticmethod(JobServiceClient.parse_model_path) - model_deployment_monitoring_job_path = staticmethod(JobServiceClient.model_deployment_monitoring_job_path) - parse_model_deployment_monitoring_job_path = staticmethod(JobServiceClient.parse_model_deployment_monitoring_job_path) - network_path = staticmethod(JobServiceClient.network_path) - parse_network_path = staticmethod(JobServiceClient.parse_network_path) - tensorboard_path = staticmethod(JobServiceClient.tensorboard_path) - parse_tensorboard_path = staticmethod(JobServiceClient.parse_tensorboard_path) - trial_path = staticmethod(JobServiceClient.trial_path) - parse_trial_path = staticmethod(JobServiceClient.parse_trial_path) - common_billing_account_path = staticmethod(JobServiceClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(JobServiceClient.parse_common_billing_account_path) - common_folder_path = staticmethod(JobServiceClient.common_folder_path) - parse_common_folder_path = staticmethod(JobServiceClient.parse_common_folder_path) - common_organization_path = staticmethod(JobServiceClient.common_organization_path) - parse_common_organization_path = staticmethod(JobServiceClient.parse_common_organization_path) - common_project_path = staticmethod(JobServiceClient.common_project_path) - parse_common_project_path = staticmethod(JobServiceClient.parse_common_project_path) - common_location_path = staticmethod(JobServiceClient.common_location_path) - parse_common_location_path = staticmethod(JobServiceClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - JobServiceAsyncClient: The constructed client. - """ - return JobServiceClient.from_service_account_info.__func__(JobServiceAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - JobServiceAsyncClient: The constructed client. - """ - return JobServiceClient.from_service_account_file.__func__(JobServiceAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> JobServiceTransport: - """Returns the transport used by the client instance. - - Returns: - JobServiceTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(JobServiceClient).get_transport_class, type(JobServiceClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, JobServiceTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the job service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.JobServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = JobServiceClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def create_custom_job(self, - request: Union[job_service.CreateCustomJobRequest, dict] = None, - *, - parent: str = None, - custom_job: gca_custom_job.CustomJob = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_custom_job.CustomJob: - r"""Creates a CustomJob. A created CustomJob right away - will be attempted to be run. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CreateCustomJobRequest, dict]): - The request object. Request message for - [JobService.CreateCustomJob][google.cloud.aiplatform.v1.JobService.CreateCustomJob]. - parent (:class:`str`): - Required. The resource name of the Location to create - the CustomJob in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - custom_job (:class:`google.cloud.aiplatform_v1.types.CustomJob`): - Required. The CustomJob to create. - This corresponds to the ``custom_job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.CustomJob: - Represents a job that runs custom - workloads such as a Docker container or - a Python package. A CustomJob can have - multiple worker pools and each worker - pool can have its own machine and input - spec. A CustomJob will be cleaned up - once the job enters terminal state - (failed or succeeded). - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, custom_job]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.CreateCustomJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if custom_job is not None: - request.custom_job = custom_job - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_custom_job, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_custom_job(self, - request: Union[job_service.GetCustomJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> custom_job.CustomJob: - r"""Gets a CustomJob. - - Args: - request (Union[google.cloud.aiplatform_v1.types.GetCustomJobRequest, dict]): - The request object. Request message for - [JobService.GetCustomJob][google.cloud.aiplatform.v1.JobService.GetCustomJob]. - name (:class:`str`): - Required. The name of the CustomJob resource. Format: - ``projects/{project}/locations/{location}/customJobs/{custom_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.CustomJob: - Represents a job that runs custom - workloads such as a Docker container or - a Python package. A CustomJob can have - multiple worker pools and each worker - pool can have its own machine and input - spec. A CustomJob will be cleaned up - once the job enters terminal state - (failed or succeeded). - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.GetCustomJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_custom_job, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_custom_jobs(self, - request: Union[job_service.ListCustomJobsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListCustomJobsAsyncPager: - r"""Lists CustomJobs in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListCustomJobsRequest, dict]): - The request object. Request message for - [JobService.ListCustomJobs][google.cloud.aiplatform.v1.JobService.ListCustomJobs]. - parent (:class:`str`): - Required. The resource name of the Location to list the - CustomJobs from. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.job_service.pagers.ListCustomJobsAsyncPager: - Response message for - [JobService.ListCustomJobs][google.cloud.aiplatform.v1.JobService.ListCustomJobs] - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.ListCustomJobsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_custom_jobs, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListCustomJobsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_custom_job(self, - request: Union[job_service.DeleteCustomJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a CustomJob. - - Args: - request (Union[google.cloud.aiplatform_v1.types.DeleteCustomJobRequest, dict]): - The request object. Request message for - [JobService.DeleteCustomJob][google.cloud.aiplatform.v1.JobService.DeleteCustomJob]. - name (:class:`str`): - Required. The name of the CustomJob resource to be - deleted. Format: - ``projects/{project}/locations/{location}/customJobs/{custom_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.DeleteCustomJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_custom_job, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def cancel_custom_job(self, - request: Union[job_service.CancelCustomJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Cancels a CustomJob. Starts asynchronous cancellation on the - CustomJob. The server makes a best effort to cancel the job, but - success is not guaranteed. Clients can use - [JobService.GetCustomJob][google.cloud.aiplatform.v1.JobService.GetCustomJob] - or other methods to check whether the cancellation succeeded or - whether the job completed despite cancellation. On successful - cancellation, the CustomJob is not deleted; instead it becomes a - job with a - [CustomJob.error][google.cloud.aiplatform.v1.CustomJob.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [CustomJob.state][google.cloud.aiplatform.v1.CustomJob.state] is - set to ``CANCELLED``. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CancelCustomJobRequest, dict]): - The request object. Request message for - [JobService.CancelCustomJob][google.cloud.aiplatform.v1.JobService.CancelCustomJob]. - name (:class:`str`): - Required. The name of the CustomJob to cancel. Format: - ``projects/{project}/locations/{location}/customJobs/{custom_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.CancelCustomJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.cancel_custom_job, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - async def create_data_labeling_job(self, - request: Union[job_service.CreateDataLabelingJobRequest, dict] = None, - *, - parent: str = None, - data_labeling_job: gca_data_labeling_job.DataLabelingJob = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_data_labeling_job.DataLabelingJob: - r"""Creates a DataLabelingJob. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CreateDataLabelingJobRequest, dict]): - The request object. Request message for - [JobService.CreateDataLabelingJob][google.cloud.aiplatform.v1.JobService.CreateDataLabelingJob]. - parent (:class:`str`): - Required. The parent of the DataLabelingJob. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - data_labeling_job (:class:`google.cloud.aiplatform_v1.types.DataLabelingJob`): - Required. The DataLabelingJob to - create. - - This corresponds to the ``data_labeling_job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.DataLabelingJob: - DataLabelingJob is used to trigger a - human labeling job on unlabeled data - from the following Dataset: - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, data_labeling_job]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.CreateDataLabelingJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if data_labeling_job is not None: - request.data_labeling_job = data_labeling_job - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_data_labeling_job, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_data_labeling_job(self, - request: Union[job_service.GetDataLabelingJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> data_labeling_job.DataLabelingJob: - r"""Gets a DataLabelingJob. - - Args: - request (Union[google.cloud.aiplatform_v1.types.GetDataLabelingJobRequest, dict]): - The request object. Request message for - [JobService.GetDataLabelingJob][google.cloud.aiplatform.v1.JobService.GetDataLabelingJob]. - name (:class:`str`): - Required. The name of the DataLabelingJob. Format: - ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.DataLabelingJob: - DataLabelingJob is used to trigger a - human labeling job on unlabeled data - from the following Dataset: - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.GetDataLabelingJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_data_labeling_job, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_data_labeling_jobs(self, - request: Union[job_service.ListDataLabelingJobsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListDataLabelingJobsAsyncPager: - r"""Lists DataLabelingJobs in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListDataLabelingJobsRequest, dict]): - The request object. Request message for - [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1.JobService.ListDataLabelingJobs]. - parent (:class:`str`): - Required. The parent of the DataLabelingJob. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.job_service.pagers.ListDataLabelingJobsAsyncPager: - Response message for - [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1.JobService.ListDataLabelingJobs]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.ListDataLabelingJobsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_data_labeling_jobs, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListDataLabelingJobsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_data_labeling_job(self, - request: Union[job_service.DeleteDataLabelingJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a DataLabelingJob. - - Args: - request (Union[google.cloud.aiplatform_v1.types.DeleteDataLabelingJobRequest, dict]): - The request object. Request message for - [JobService.DeleteDataLabelingJob][google.cloud.aiplatform.v1.JobService.DeleteDataLabelingJob]. - name (:class:`str`): - Required. The name of the DataLabelingJob to be deleted. - Format: - ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.DeleteDataLabelingJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_data_labeling_job, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def cancel_data_labeling_job(self, - request: Union[job_service.CancelDataLabelingJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Cancels a DataLabelingJob. Success of cancellation is - not guaranteed. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CancelDataLabelingJobRequest, dict]): - The request object. Request message for - [JobService.CancelDataLabelingJob][google.cloud.aiplatform.v1.JobService.CancelDataLabelingJob]. - name (:class:`str`): - Required. The name of the DataLabelingJob. Format: - ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.CancelDataLabelingJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.cancel_data_labeling_job, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - async def create_hyperparameter_tuning_job(self, - request: Union[job_service.CreateHyperparameterTuningJobRequest, dict] = None, - *, - parent: str = None, - hyperparameter_tuning_job: gca_hyperparameter_tuning_job.HyperparameterTuningJob = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_hyperparameter_tuning_job.HyperparameterTuningJob: - r"""Creates a HyperparameterTuningJob - - Args: - request (Union[google.cloud.aiplatform_v1.types.CreateHyperparameterTuningJobRequest, dict]): - The request object. Request message for - [JobService.CreateHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.CreateHyperparameterTuningJob]. - parent (:class:`str`): - Required. The resource name of the Location to create - the HyperparameterTuningJob in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - hyperparameter_tuning_job (:class:`google.cloud.aiplatform_v1.types.HyperparameterTuningJob`): - Required. The HyperparameterTuningJob - to create. - - This corresponds to the ``hyperparameter_tuning_job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.HyperparameterTuningJob: - Represents a HyperparameterTuningJob. - A HyperparameterTuningJob has a Study - specification and multiple CustomJobs - with identical CustomJob specification. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, hyperparameter_tuning_job]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.CreateHyperparameterTuningJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if hyperparameter_tuning_job is not None: - request.hyperparameter_tuning_job = hyperparameter_tuning_job - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_hyperparameter_tuning_job, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_hyperparameter_tuning_job(self, - request: Union[job_service.GetHyperparameterTuningJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> hyperparameter_tuning_job.HyperparameterTuningJob: - r"""Gets a HyperparameterTuningJob - - Args: - request (Union[google.cloud.aiplatform_v1.types.GetHyperparameterTuningJobRequest, dict]): - The request object. Request message for - [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.GetHyperparameterTuningJob]. - name (:class:`str`): - Required. The name of the HyperparameterTuningJob - resource. Format: - ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.HyperparameterTuningJob: - Represents a HyperparameterTuningJob. - A HyperparameterTuningJob has a Study - specification and multiple CustomJobs - with identical CustomJob specification. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.GetHyperparameterTuningJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_hyperparameter_tuning_job, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_hyperparameter_tuning_jobs(self, - request: Union[job_service.ListHyperparameterTuningJobsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListHyperparameterTuningJobsAsyncPager: - r"""Lists HyperparameterTuningJobs in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsRequest, dict]): - The request object. Request message for - [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1.JobService.ListHyperparameterTuningJobs]. - parent (:class:`str`): - Required. The resource name of the Location to list the - HyperparameterTuningJobs from. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.job_service.pagers.ListHyperparameterTuningJobsAsyncPager: - Response message for - [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1.JobService.ListHyperparameterTuningJobs] - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.ListHyperparameterTuningJobsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_hyperparameter_tuning_jobs, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListHyperparameterTuningJobsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_hyperparameter_tuning_job(self, - request: Union[job_service.DeleteHyperparameterTuningJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a HyperparameterTuningJob. - - Args: - request (Union[google.cloud.aiplatform_v1.types.DeleteHyperparameterTuningJobRequest, dict]): - The request object. Request message for - [JobService.DeleteHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.DeleteHyperparameterTuningJob]. - name (:class:`str`): - Required. The name of the HyperparameterTuningJob - resource to be deleted. Format: - ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.DeleteHyperparameterTuningJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_hyperparameter_tuning_job, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def cancel_hyperparameter_tuning_job(self, - request: Union[job_service.CancelHyperparameterTuningJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Cancels a HyperparameterTuningJob. Starts asynchronous - cancellation on the HyperparameterTuningJob. The server makes a - best effort to cancel the job, but success is not guaranteed. - Clients can use - [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.GetHyperparameterTuningJob] - or other methods to check whether the cancellation succeeded or - whether the job completed despite cancellation. On successful - cancellation, the HyperparameterTuningJob is not deleted; - instead it becomes a job with a - [HyperparameterTuningJob.error][google.cloud.aiplatform.v1.HyperparameterTuningJob.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [HyperparameterTuningJob.state][google.cloud.aiplatform.v1.HyperparameterTuningJob.state] - is set to ``CANCELLED``. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CancelHyperparameterTuningJobRequest, dict]): - The request object. Request message for - [JobService.CancelHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.CancelHyperparameterTuningJob]. - name (:class:`str`): - Required. The name of the HyperparameterTuningJob to - cancel. Format: - ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.CancelHyperparameterTuningJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.cancel_hyperparameter_tuning_job, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - async def create_batch_prediction_job(self, - request: Union[job_service.CreateBatchPredictionJobRequest, dict] = None, - *, - parent: str = None, - batch_prediction_job: gca_batch_prediction_job.BatchPredictionJob = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_batch_prediction_job.BatchPredictionJob: - r"""Creates a BatchPredictionJob. A BatchPredictionJob - once created will right away be attempted to start. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CreateBatchPredictionJobRequest, dict]): - The request object. Request message for - [JobService.CreateBatchPredictionJob][google.cloud.aiplatform.v1.JobService.CreateBatchPredictionJob]. - parent (:class:`str`): - Required. The resource name of the Location to create - the BatchPredictionJob in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - batch_prediction_job (:class:`google.cloud.aiplatform_v1.types.BatchPredictionJob`): - Required. The BatchPredictionJob to - create. - - This corresponds to the ``batch_prediction_job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.BatchPredictionJob: - A job that uses a [Model][google.cloud.aiplatform.v1.BatchPredictionJob.model] to produce predictions - on multiple [input - instances][google.cloud.aiplatform.v1.BatchPredictionJob.input_config]. - If predictions for significant portion of the - instances fail, the job may finish without attempting - predictions for all remaining instances. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, batch_prediction_job]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.CreateBatchPredictionJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if batch_prediction_job is not None: - request.batch_prediction_job = batch_prediction_job - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_batch_prediction_job, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_batch_prediction_job(self, - request: Union[job_service.GetBatchPredictionJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> batch_prediction_job.BatchPredictionJob: - r"""Gets a BatchPredictionJob - - Args: - request (Union[google.cloud.aiplatform_v1.types.GetBatchPredictionJobRequest, dict]): - The request object. Request message for - [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1.JobService.GetBatchPredictionJob]. - name (:class:`str`): - Required. The name of the BatchPredictionJob resource. - Format: - ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.BatchPredictionJob: - A job that uses a [Model][google.cloud.aiplatform.v1.BatchPredictionJob.model] to produce predictions - on multiple [input - instances][google.cloud.aiplatform.v1.BatchPredictionJob.input_config]. - If predictions for significant portion of the - instances fail, the job may finish without attempting - predictions for all remaining instances. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.GetBatchPredictionJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_batch_prediction_job, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_batch_prediction_jobs(self, - request: Union[job_service.ListBatchPredictionJobsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListBatchPredictionJobsAsyncPager: - r"""Lists BatchPredictionJobs in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListBatchPredictionJobsRequest, dict]): - The request object. Request message for - [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1.JobService.ListBatchPredictionJobs]. - parent (:class:`str`): - Required. The resource name of the Location to list the - BatchPredictionJobs from. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.job_service.pagers.ListBatchPredictionJobsAsyncPager: - Response message for - [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1.JobService.ListBatchPredictionJobs] - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.ListBatchPredictionJobsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_batch_prediction_jobs, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListBatchPredictionJobsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_batch_prediction_job(self, - request: Union[job_service.DeleteBatchPredictionJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a BatchPredictionJob. Can only be called on - jobs that already finished. - - Args: - request (Union[google.cloud.aiplatform_v1.types.DeleteBatchPredictionJobRequest, dict]): - The request object. Request message for - [JobService.DeleteBatchPredictionJob][google.cloud.aiplatform.v1.JobService.DeleteBatchPredictionJob]. - name (:class:`str`): - Required. The name of the BatchPredictionJob resource to - be deleted. Format: - ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.DeleteBatchPredictionJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_batch_prediction_job, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def cancel_batch_prediction_job(self, - request: Union[job_service.CancelBatchPredictionJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Cancels a BatchPredictionJob. - - Starts asynchronous cancellation on the BatchPredictionJob. The - server makes the best effort to cancel the job, but success is - not guaranteed. Clients can use - [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1.JobService.GetBatchPredictionJob] - or other methods to check whether the cancellation succeeded or - whether the job completed despite cancellation. On a successful - cancellation, the BatchPredictionJob is not deleted;instead its - [BatchPredictionJob.state][google.cloud.aiplatform.v1.BatchPredictionJob.state] - is set to ``CANCELLED``. Any files already outputted by the job - are not deleted. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CancelBatchPredictionJobRequest, dict]): - The request object. Request message for - [JobService.CancelBatchPredictionJob][google.cloud.aiplatform.v1.JobService.CancelBatchPredictionJob]. - name (:class:`str`): - Required. The name of the BatchPredictionJob to cancel. - Format: - ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.CancelBatchPredictionJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.cancel_batch_prediction_job, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - async def create_model_deployment_monitoring_job(self, - request: Union[job_service.CreateModelDeploymentMonitoringJobRequest, dict] = None, - *, - parent: str = None, - model_deployment_monitoring_job: gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob: - r"""Creates a ModelDeploymentMonitoringJob. It will run - periodically on a configured interval. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CreateModelDeploymentMonitoringJobRequest, dict]): - The request object. Request message for - [JobService.CreateModelDeploymentMonitoringJob][google.cloud.aiplatform.v1.JobService.CreateModelDeploymentMonitoringJob]. - parent (:class:`str`): - Required. The parent of the - ModelDeploymentMonitoringJob. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - model_deployment_monitoring_job (:class:`google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob`): - Required. The - ModelDeploymentMonitoringJob to create - - This corresponds to the ``model_deployment_monitoring_job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob: - Represents a job that runs - periodically to monitor the deployed - models in an endpoint. It will analyze - the logged training & prediction data to - detect any abnormal behaviors. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, model_deployment_monitoring_job]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.CreateModelDeploymentMonitoringJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if model_deployment_monitoring_job is not None: - request.model_deployment_monitoring_job = model_deployment_monitoring_job - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_model_deployment_monitoring_job, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def search_model_deployment_monitoring_stats_anomalies(self, - request: Union[job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest, dict] = None, - *, - model_deployment_monitoring_job: str = None, - deployed_model_id: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.SearchModelDeploymentMonitoringStatsAnomaliesAsyncPager: - r"""Searches Model Monitoring Statistics generated within - a given time window. - - Args: - request (Union[google.cloud.aiplatform_v1.types.SearchModelDeploymentMonitoringStatsAnomaliesRequest, dict]): - The request object. Request message for - [JobService.SearchModelDeploymentMonitoringStatsAnomalies][google.cloud.aiplatform.v1.JobService.SearchModelDeploymentMonitoringStatsAnomalies]. - model_deployment_monitoring_job (:class:`str`): - Required. ModelDeploymentMonitoring Job resource name. - Format: - \`projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job} - - This corresponds to the ``model_deployment_monitoring_job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - deployed_model_id (:class:`str`): - Required. The DeployedModel ID of the - [ModelDeploymentMonitoringObjectiveConfig.deployed_model_id]. - - This corresponds to the ``deployed_model_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.job_service.pagers.SearchModelDeploymentMonitoringStatsAnomaliesAsyncPager: - Response message for - [JobService.SearchModelDeploymentMonitoringStatsAnomalies][google.cloud.aiplatform.v1.JobService.SearchModelDeploymentMonitoringStatsAnomalies]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([model_deployment_monitoring_job, deployed_model_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if model_deployment_monitoring_job is not None: - request.model_deployment_monitoring_job = model_deployment_monitoring_job - if deployed_model_id is not None: - request.deployed_model_id = deployed_model_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.search_model_deployment_monitoring_stats_anomalies, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("model_deployment_monitoring_job", request.model_deployment_monitoring_job), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.SearchModelDeploymentMonitoringStatsAnomaliesAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_model_deployment_monitoring_job(self, - request: Union[job_service.GetModelDeploymentMonitoringJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> model_deployment_monitoring_job.ModelDeploymentMonitoringJob: - r"""Gets a ModelDeploymentMonitoringJob. - - Args: - request (Union[google.cloud.aiplatform_v1.types.GetModelDeploymentMonitoringJobRequest, dict]): - The request object. Request message for - [JobService.GetModelDeploymentMonitoringJob][google.cloud.aiplatform.v1.JobService.GetModelDeploymentMonitoringJob]. - name (:class:`str`): - Required. The resource name of the - ModelDeploymentMonitoringJob. Format: - ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob: - Represents a job that runs - periodically to monitor the deployed - models in an endpoint. It will analyze - the logged training & prediction data to - detect any abnormal behaviors. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.GetModelDeploymentMonitoringJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_model_deployment_monitoring_job, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_model_deployment_monitoring_jobs(self, - request: Union[job_service.ListModelDeploymentMonitoringJobsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListModelDeploymentMonitoringJobsAsyncPager: - r"""Lists ModelDeploymentMonitoringJobs in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListModelDeploymentMonitoringJobsRequest, dict]): - The request object. Request message for - [JobService.ListModelDeploymentMonitoringJobs][google.cloud.aiplatform.v1.JobService.ListModelDeploymentMonitoringJobs]. - parent (:class:`str`): - Required. The parent of the - ModelDeploymentMonitoringJob. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.job_service.pagers.ListModelDeploymentMonitoringJobsAsyncPager: - Response message for - [JobService.ListModelDeploymentMonitoringJobs][google.cloud.aiplatform.v1.JobService.ListModelDeploymentMonitoringJobs]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.ListModelDeploymentMonitoringJobsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_model_deployment_monitoring_jobs, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListModelDeploymentMonitoringJobsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_model_deployment_monitoring_job(self, - request: Union[job_service.UpdateModelDeploymentMonitoringJobRequest, dict] = None, - *, - model_deployment_monitoring_job: gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Updates a ModelDeploymentMonitoringJob. - - Args: - request (Union[google.cloud.aiplatform_v1.types.UpdateModelDeploymentMonitoringJobRequest, dict]): - The request object. Request message for - [JobService.UpdateModelDeploymentMonitoringJob][google.cloud.aiplatform.v1.JobService.UpdateModelDeploymentMonitoringJob]. - model_deployment_monitoring_job (:class:`google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob`): - Required. The model monitoring - configuration which replaces the - resource on the server. - - This corresponds to the ``model_deployment_monitoring_job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. The update mask is used to specify the fields - to be overwritten in the ModelDeploymentMonitoringJob - resource by the update. The fields specified in the - update_mask are relative to the resource, not the full - request. A field will be overwritten if it is in the - mask. If the user does not provide a mask then only the - non-empty fields present in the request will be - overwritten. Set the update_mask to ``*`` to override - all fields. For the objective config, the user can - either provide the update mask for - model_deployment_monitoring_objective_configs or any - combination of its nested fields, such as: - model_deployment_monitoring_objective_configs.objective_config.training_dataset. - - Updatable fields: - - - ``display_name`` - - ``model_deployment_monitoring_schedule_config`` - - ``model_monitoring_alert_config`` - - ``logging_sampling_strategy`` - - ``labels`` - - ``log_ttl`` - - ``enable_monitoring_pipeline_logs`` . and - - ``model_deployment_monitoring_objective_configs`` . - or - - ``model_deployment_monitoring_objective_configs.objective_config.training_dataset`` - - ``model_deployment_monitoring_objective_configs.objective_config.training_prediction_skew_detection_config`` - - ``model_deployment_monitoring_objective_configs.objective_config.prediction_drift_detection_config`` - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob` Represents a job that runs periodically to monitor the deployed models in an - endpoint. It will analyze the logged training & - prediction data to detect any abnormal behaviors. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([model_deployment_monitoring_job, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.UpdateModelDeploymentMonitoringJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if model_deployment_monitoring_job is not None: - request.model_deployment_monitoring_job = model_deployment_monitoring_job - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_model_deployment_monitoring_job, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("model_deployment_monitoring_job.name", request.model_deployment_monitoring_job.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob, - metadata_type=job_service.UpdateModelDeploymentMonitoringJobOperationMetadata, - ) - - # Done; return the response. - return response - - async def delete_model_deployment_monitoring_job(self, - request: Union[job_service.DeleteModelDeploymentMonitoringJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a ModelDeploymentMonitoringJob. - - Args: - request (Union[google.cloud.aiplatform_v1.types.DeleteModelDeploymentMonitoringJobRequest, dict]): - The request object. Request message for - [JobService.DeleteModelDeploymentMonitoringJob][google.cloud.aiplatform.v1.JobService.DeleteModelDeploymentMonitoringJob]. - name (:class:`str`): - Required. The resource name of the model monitoring job - to delete. Format: - ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.DeleteModelDeploymentMonitoringJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_model_deployment_monitoring_job, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def pause_model_deployment_monitoring_job(self, - request: Union[job_service.PauseModelDeploymentMonitoringJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Pauses a ModelDeploymentMonitoringJob. If the job is running, - the server makes a best effort to cancel the job. Will mark - [ModelDeploymentMonitoringJob.state][google.cloud.aiplatform.v1.ModelDeploymentMonitoringJob.state] - to 'PAUSED'. - - Args: - request (Union[google.cloud.aiplatform_v1.types.PauseModelDeploymentMonitoringJobRequest, dict]): - The request object. Request message for - [JobService.PauseModelDeploymentMonitoringJob][google.cloud.aiplatform.v1.JobService.PauseModelDeploymentMonitoringJob]. - name (:class:`str`): - Required. The resource name of the - ModelDeploymentMonitoringJob to pause. Format: - ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.PauseModelDeploymentMonitoringJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.pause_model_deployment_monitoring_job, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - async def resume_model_deployment_monitoring_job(self, - request: Union[job_service.ResumeModelDeploymentMonitoringJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Resumes a paused ModelDeploymentMonitoringJob. It - will start to run from next scheduled time. A deleted - ModelDeploymentMonitoringJob can't be resumed. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ResumeModelDeploymentMonitoringJobRequest, dict]): - The request object. Request message for - [JobService.ResumeModelDeploymentMonitoringJob][google.cloud.aiplatform.v1.JobService.ResumeModelDeploymentMonitoringJob]. - name (:class:`str`): - Required. The resource name of the - ModelDeploymentMonitoringJob to resume. Format: - ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.ResumeModelDeploymentMonitoringJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.resume_model_deployment_monitoring_job, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - async def __aenter__(self): - return self - - async def __aexit__(self, exc_type, exc, tb): - await self.transport.close() - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "JobServiceAsyncClient", -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/client.py deleted file mode 100644 index 430a97451b..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/client.py +++ /dev/null @@ -1,2937 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import os -import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1.services.job_service import pagers -from google.cloud.aiplatform_v1.types import batch_prediction_job -from google.cloud.aiplatform_v1.types import batch_prediction_job as gca_batch_prediction_job -from google.cloud.aiplatform_v1.types import completion_stats -from google.cloud.aiplatform_v1.types import custom_job -from google.cloud.aiplatform_v1.types import custom_job as gca_custom_job -from google.cloud.aiplatform_v1.types import data_labeling_job -from google.cloud.aiplatform_v1.types import data_labeling_job as gca_data_labeling_job -from google.cloud.aiplatform_v1.types import encryption_spec -from google.cloud.aiplatform_v1.types import explanation -from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job -from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job as gca_hyperparameter_tuning_job -from google.cloud.aiplatform_v1.types import io -from google.cloud.aiplatform_v1.types import job_service -from google.cloud.aiplatform_v1.types import job_state -from google.cloud.aiplatform_v1.types import machine_resources -from google.cloud.aiplatform_v1.types import manual_batch_tuning_parameters -from google.cloud.aiplatform_v1.types import model_deployment_monitoring_job -from google.cloud.aiplatform_v1.types import model_deployment_monitoring_job as gca_model_deployment_monitoring_job -from google.cloud.aiplatform_v1.types import model_monitoring -from google.cloud.aiplatform_v1.types import operation as gca_operation -from google.cloud.aiplatform_v1.types import study -from google.cloud.aiplatform_v1.types import unmanaged_container_model -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore -from google.type import money_pb2 # type: ignore -from .transports.base import JobServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import JobServiceGrpcTransport -from .transports.grpc_asyncio import JobServiceGrpcAsyncIOTransport - - -class JobServiceClientMeta(type): - """Metaclass for the JobService client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[JobServiceTransport]] - _transport_registry["grpc"] = JobServiceGrpcTransport - _transport_registry["grpc_asyncio"] = JobServiceGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[JobServiceTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class JobServiceClient(metaclass=JobServiceClientMeta): - """A service for creating and managing Vertex AI's jobs.""" - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "aiplatform.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - JobServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - JobServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> JobServiceTransport: - """Returns the transport used by the client instance. - - Returns: - JobServiceTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def batch_prediction_job_path(project: str,location: str,batch_prediction_job: str,) -> str: - """Returns a fully-qualified batch_prediction_job string.""" - return "projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}".format(project=project, location=location, batch_prediction_job=batch_prediction_job, ) - - @staticmethod - def parse_batch_prediction_job_path(path: str) -> Dict[str,str]: - """Parses a batch_prediction_job path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/batchPredictionJobs/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def custom_job_path(project: str,location: str,custom_job: str,) -> str: - """Returns a fully-qualified custom_job string.""" - return "projects/{project}/locations/{location}/customJobs/{custom_job}".format(project=project, location=location, custom_job=custom_job, ) - - @staticmethod - def parse_custom_job_path(path: str) -> Dict[str,str]: - """Parses a custom_job path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/customJobs/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def data_labeling_job_path(project: str,location: str,data_labeling_job: str,) -> str: - """Returns a fully-qualified data_labeling_job string.""" - return "projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}".format(project=project, location=location, data_labeling_job=data_labeling_job, ) - - @staticmethod - def parse_data_labeling_job_path(path: str) -> Dict[str,str]: - """Parses a data_labeling_job path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/dataLabelingJobs/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def dataset_path(project: str,location: str,dataset: str,) -> str: - """Returns a fully-qualified dataset string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) - - @staticmethod - def parse_dataset_path(path: str) -> Dict[str,str]: - """Parses a dataset path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def endpoint_path(project: str,location: str,endpoint: str,) -> str: - """Returns a fully-qualified endpoint string.""" - return "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) - - @staticmethod - def parse_endpoint_path(path: str) -> Dict[str,str]: - """Parses a endpoint path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/endpoints/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def hyperparameter_tuning_job_path(project: str,location: str,hyperparameter_tuning_job: str,) -> str: - """Returns a fully-qualified hyperparameter_tuning_job string.""" - return "projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}".format(project=project, location=location, hyperparameter_tuning_job=hyperparameter_tuning_job, ) - - @staticmethod - def parse_hyperparameter_tuning_job_path(path: str) -> Dict[str,str]: - """Parses a hyperparameter_tuning_job path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/hyperparameterTuningJobs/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def model_path(project: str,location: str,model: str,) -> str: - """Returns a fully-qualified model string.""" - return "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) - - @staticmethod - def parse_model_path(path: str) -> Dict[str,str]: - """Parses a model path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def model_deployment_monitoring_job_path(project: str,location: str,model_deployment_monitoring_job: str,) -> str: - """Returns a fully-qualified model_deployment_monitoring_job string.""" - return "projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}".format(project=project, location=location, model_deployment_monitoring_job=model_deployment_monitoring_job, ) - - @staticmethod - def parse_model_deployment_monitoring_job_path(path: str) -> Dict[str,str]: - """Parses a model_deployment_monitoring_job path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/modelDeploymentMonitoringJobs/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def network_path(project: str,network: str,) -> str: - """Returns a fully-qualified network string.""" - return "projects/{project}/global/networks/{network}".format(project=project, network=network, ) - - @staticmethod - def parse_network_path(path: str) -> Dict[str,str]: - """Parses a network path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/global/networks/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def tensorboard_path(project: str,location: str,tensorboard: str,) -> str: - """Returns a fully-qualified tensorboard string.""" - return "projects/{project}/locations/{location}/tensorboards/{tensorboard}".format(project=project, location=location, tensorboard=tensorboard, ) - - @staticmethod - def parse_tensorboard_path(path: str) -> Dict[str,str]: - """Parses a tensorboard path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/tensorboards/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def trial_path(project: str,location: str,study: str,trial: str,) -> str: - """Returns a fully-qualified trial string.""" - return "projects/{project}/locations/{location}/studies/{study}/trials/{trial}".format(project=project, location=location, study=study, trial=trial, ) - - @staticmethod - def parse_trial_path(path: str) -> Dict[str,str]: - """Parses a trial path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/studies/(?P.+?)/trials/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, JobServiceTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the job service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, JobServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): - raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, JobServiceTransport): - # transport is a JobServiceTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - always_use_jwt_access=True, - ) - - def create_custom_job(self, - request: Union[job_service.CreateCustomJobRequest, dict] = None, - *, - parent: str = None, - custom_job: gca_custom_job.CustomJob = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_custom_job.CustomJob: - r"""Creates a CustomJob. A created CustomJob right away - will be attempted to be run. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CreateCustomJobRequest, dict]): - The request object. Request message for - [JobService.CreateCustomJob][google.cloud.aiplatform.v1.JobService.CreateCustomJob]. - parent (str): - Required. The resource name of the Location to create - the CustomJob in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - custom_job (google.cloud.aiplatform_v1.types.CustomJob): - Required. The CustomJob to create. - This corresponds to the ``custom_job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.CustomJob: - Represents a job that runs custom - workloads such as a Docker container or - a Python package. A CustomJob can have - multiple worker pools and each worker - pool can have its own machine and input - spec. A CustomJob will be cleaned up - once the job enters terminal state - (failed or succeeded). - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, custom_job]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.CreateCustomJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.CreateCustomJobRequest): - request = job_service.CreateCustomJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if custom_job is not None: - request.custom_job = custom_job - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_custom_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_custom_job(self, - request: Union[job_service.GetCustomJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> custom_job.CustomJob: - r"""Gets a CustomJob. - - Args: - request (Union[google.cloud.aiplatform_v1.types.GetCustomJobRequest, dict]): - The request object. Request message for - [JobService.GetCustomJob][google.cloud.aiplatform.v1.JobService.GetCustomJob]. - name (str): - Required. The name of the CustomJob resource. Format: - ``projects/{project}/locations/{location}/customJobs/{custom_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.CustomJob: - Represents a job that runs custom - workloads such as a Docker container or - a Python package. A CustomJob can have - multiple worker pools and each worker - pool can have its own machine and input - spec. A CustomJob will be cleaned up - once the job enters terminal state - (failed or succeeded). - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.GetCustomJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.GetCustomJobRequest): - request = job_service.GetCustomJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_custom_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_custom_jobs(self, - request: Union[job_service.ListCustomJobsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListCustomJobsPager: - r"""Lists CustomJobs in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListCustomJobsRequest, dict]): - The request object. Request message for - [JobService.ListCustomJobs][google.cloud.aiplatform.v1.JobService.ListCustomJobs]. - parent (str): - Required. The resource name of the Location to list the - CustomJobs from. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.job_service.pagers.ListCustomJobsPager: - Response message for - [JobService.ListCustomJobs][google.cloud.aiplatform.v1.JobService.ListCustomJobs] - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.ListCustomJobsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.ListCustomJobsRequest): - request = job_service.ListCustomJobsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_custom_jobs] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListCustomJobsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_custom_job(self, - request: Union[job_service.DeleteCustomJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a CustomJob. - - Args: - request (Union[google.cloud.aiplatform_v1.types.DeleteCustomJobRequest, dict]): - The request object. Request message for - [JobService.DeleteCustomJob][google.cloud.aiplatform.v1.JobService.DeleteCustomJob]. - name (str): - Required. The name of the CustomJob resource to be - deleted. Format: - ``projects/{project}/locations/{location}/customJobs/{custom_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.DeleteCustomJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.DeleteCustomJobRequest): - request = job_service.DeleteCustomJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_custom_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def cancel_custom_job(self, - request: Union[job_service.CancelCustomJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Cancels a CustomJob. Starts asynchronous cancellation on the - CustomJob. The server makes a best effort to cancel the job, but - success is not guaranteed. Clients can use - [JobService.GetCustomJob][google.cloud.aiplatform.v1.JobService.GetCustomJob] - or other methods to check whether the cancellation succeeded or - whether the job completed despite cancellation. On successful - cancellation, the CustomJob is not deleted; instead it becomes a - job with a - [CustomJob.error][google.cloud.aiplatform.v1.CustomJob.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [CustomJob.state][google.cloud.aiplatform.v1.CustomJob.state] is - set to ``CANCELLED``. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CancelCustomJobRequest, dict]): - The request object. Request message for - [JobService.CancelCustomJob][google.cloud.aiplatform.v1.JobService.CancelCustomJob]. - name (str): - Required. The name of the CustomJob to cancel. Format: - ``projects/{project}/locations/{location}/customJobs/{custom_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.CancelCustomJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.CancelCustomJobRequest): - request = job_service.CancelCustomJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.cancel_custom_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - def create_data_labeling_job(self, - request: Union[job_service.CreateDataLabelingJobRequest, dict] = None, - *, - parent: str = None, - data_labeling_job: gca_data_labeling_job.DataLabelingJob = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_data_labeling_job.DataLabelingJob: - r"""Creates a DataLabelingJob. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CreateDataLabelingJobRequest, dict]): - The request object. Request message for - [JobService.CreateDataLabelingJob][google.cloud.aiplatform.v1.JobService.CreateDataLabelingJob]. - parent (str): - Required. The parent of the DataLabelingJob. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - data_labeling_job (google.cloud.aiplatform_v1.types.DataLabelingJob): - Required. The DataLabelingJob to - create. - - This corresponds to the ``data_labeling_job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.DataLabelingJob: - DataLabelingJob is used to trigger a - human labeling job on unlabeled data - from the following Dataset: - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, data_labeling_job]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.CreateDataLabelingJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.CreateDataLabelingJobRequest): - request = job_service.CreateDataLabelingJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if data_labeling_job is not None: - request.data_labeling_job = data_labeling_job - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_data_labeling_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_data_labeling_job(self, - request: Union[job_service.GetDataLabelingJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> data_labeling_job.DataLabelingJob: - r"""Gets a DataLabelingJob. - - Args: - request (Union[google.cloud.aiplatform_v1.types.GetDataLabelingJobRequest, dict]): - The request object. Request message for - [JobService.GetDataLabelingJob][google.cloud.aiplatform.v1.JobService.GetDataLabelingJob]. - name (str): - Required. The name of the DataLabelingJob. Format: - ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.DataLabelingJob: - DataLabelingJob is used to trigger a - human labeling job on unlabeled data - from the following Dataset: - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.GetDataLabelingJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.GetDataLabelingJobRequest): - request = job_service.GetDataLabelingJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_data_labeling_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_data_labeling_jobs(self, - request: Union[job_service.ListDataLabelingJobsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListDataLabelingJobsPager: - r"""Lists DataLabelingJobs in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListDataLabelingJobsRequest, dict]): - The request object. Request message for - [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1.JobService.ListDataLabelingJobs]. - parent (str): - Required. The parent of the DataLabelingJob. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.job_service.pagers.ListDataLabelingJobsPager: - Response message for - [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1.JobService.ListDataLabelingJobs]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.ListDataLabelingJobsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.ListDataLabelingJobsRequest): - request = job_service.ListDataLabelingJobsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_data_labeling_jobs] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListDataLabelingJobsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_data_labeling_job(self, - request: Union[job_service.DeleteDataLabelingJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a DataLabelingJob. - - Args: - request (Union[google.cloud.aiplatform_v1.types.DeleteDataLabelingJobRequest, dict]): - The request object. Request message for - [JobService.DeleteDataLabelingJob][google.cloud.aiplatform.v1.JobService.DeleteDataLabelingJob]. - name (str): - Required. The name of the DataLabelingJob to be deleted. - Format: - ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.DeleteDataLabelingJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.DeleteDataLabelingJobRequest): - request = job_service.DeleteDataLabelingJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_data_labeling_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def cancel_data_labeling_job(self, - request: Union[job_service.CancelDataLabelingJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Cancels a DataLabelingJob. Success of cancellation is - not guaranteed. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CancelDataLabelingJobRequest, dict]): - The request object. Request message for - [JobService.CancelDataLabelingJob][google.cloud.aiplatform.v1.JobService.CancelDataLabelingJob]. - name (str): - Required. The name of the DataLabelingJob. Format: - ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.CancelDataLabelingJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.CancelDataLabelingJobRequest): - request = job_service.CancelDataLabelingJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.cancel_data_labeling_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - def create_hyperparameter_tuning_job(self, - request: Union[job_service.CreateHyperparameterTuningJobRequest, dict] = None, - *, - parent: str = None, - hyperparameter_tuning_job: gca_hyperparameter_tuning_job.HyperparameterTuningJob = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_hyperparameter_tuning_job.HyperparameterTuningJob: - r"""Creates a HyperparameterTuningJob - - Args: - request (Union[google.cloud.aiplatform_v1.types.CreateHyperparameterTuningJobRequest, dict]): - The request object. Request message for - [JobService.CreateHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.CreateHyperparameterTuningJob]. - parent (str): - Required. The resource name of the Location to create - the HyperparameterTuningJob in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - hyperparameter_tuning_job (google.cloud.aiplatform_v1.types.HyperparameterTuningJob): - Required. The HyperparameterTuningJob - to create. - - This corresponds to the ``hyperparameter_tuning_job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.HyperparameterTuningJob: - Represents a HyperparameterTuningJob. - A HyperparameterTuningJob has a Study - specification and multiple CustomJobs - with identical CustomJob specification. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, hyperparameter_tuning_job]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.CreateHyperparameterTuningJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.CreateHyperparameterTuningJobRequest): - request = job_service.CreateHyperparameterTuningJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if hyperparameter_tuning_job is not None: - request.hyperparameter_tuning_job = hyperparameter_tuning_job - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_hyperparameter_tuning_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_hyperparameter_tuning_job(self, - request: Union[job_service.GetHyperparameterTuningJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> hyperparameter_tuning_job.HyperparameterTuningJob: - r"""Gets a HyperparameterTuningJob - - Args: - request (Union[google.cloud.aiplatform_v1.types.GetHyperparameterTuningJobRequest, dict]): - The request object. Request message for - [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.GetHyperparameterTuningJob]. - name (str): - Required. The name of the HyperparameterTuningJob - resource. Format: - ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.HyperparameterTuningJob: - Represents a HyperparameterTuningJob. - A HyperparameterTuningJob has a Study - specification and multiple CustomJobs - with identical CustomJob specification. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.GetHyperparameterTuningJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.GetHyperparameterTuningJobRequest): - request = job_service.GetHyperparameterTuningJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_hyperparameter_tuning_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_hyperparameter_tuning_jobs(self, - request: Union[job_service.ListHyperparameterTuningJobsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListHyperparameterTuningJobsPager: - r"""Lists HyperparameterTuningJobs in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsRequest, dict]): - The request object. Request message for - [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1.JobService.ListHyperparameterTuningJobs]. - parent (str): - Required. The resource name of the Location to list the - HyperparameterTuningJobs from. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.job_service.pagers.ListHyperparameterTuningJobsPager: - Response message for - [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1.JobService.ListHyperparameterTuningJobs] - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.ListHyperparameterTuningJobsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.ListHyperparameterTuningJobsRequest): - request = job_service.ListHyperparameterTuningJobsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_hyperparameter_tuning_jobs] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListHyperparameterTuningJobsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_hyperparameter_tuning_job(self, - request: Union[job_service.DeleteHyperparameterTuningJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a HyperparameterTuningJob. - - Args: - request (Union[google.cloud.aiplatform_v1.types.DeleteHyperparameterTuningJobRequest, dict]): - The request object. Request message for - [JobService.DeleteHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.DeleteHyperparameterTuningJob]. - name (str): - Required. The name of the HyperparameterTuningJob - resource to be deleted. Format: - ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.DeleteHyperparameterTuningJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.DeleteHyperparameterTuningJobRequest): - request = job_service.DeleteHyperparameterTuningJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_hyperparameter_tuning_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def cancel_hyperparameter_tuning_job(self, - request: Union[job_service.CancelHyperparameterTuningJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Cancels a HyperparameterTuningJob. Starts asynchronous - cancellation on the HyperparameterTuningJob. The server makes a - best effort to cancel the job, but success is not guaranteed. - Clients can use - [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.GetHyperparameterTuningJob] - or other methods to check whether the cancellation succeeded or - whether the job completed despite cancellation. On successful - cancellation, the HyperparameterTuningJob is not deleted; - instead it becomes a job with a - [HyperparameterTuningJob.error][google.cloud.aiplatform.v1.HyperparameterTuningJob.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [HyperparameterTuningJob.state][google.cloud.aiplatform.v1.HyperparameterTuningJob.state] - is set to ``CANCELLED``. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CancelHyperparameterTuningJobRequest, dict]): - The request object. Request message for - [JobService.CancelHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.CancelHyperparameterTuningJob]. - name (str): - Required. The name of the HyperparameterTuningJob to - cancel. Format: - ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.CancelHyperparameterTuningJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.CancelHyperparameterTuningJobRequest): - request = job_service.CancelHyperparameterTuningJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.cancel_hyperparameter_tuning_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - def create_batch_prediction_job(self, - request: Union[job_service.CreateBatchPredictionJobRequest, dict] = None, - *, - parent: str = None, - batch_prediction_job: gca_batch_prediction_job.BatchPredictionJob = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_batch_prediction_job.BatchPredictionJob: - r"""Creates a BatchPredictionJob. A BatchPredictionJob - once created will right away be attempted to start. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CreateBatchPredictionJobRequest, dict]): - The request object. Request message for - [JobService.CreateBatchPredictionJob][google.cloud.aiplatform.v1.JobService.CreateBatchPredictionJob]. - parent (str): - Required. The resource name of the Location to create - the BatchPredictionJob in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - batch_prediction_job (google.cloud.aiplatform_v1.types.BatchPredictionJob): - Required. The BatchPredictionJob to - create. - - This corresponds to the ``batch_prediction_job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.BatchPredictionJob: - A job that uses a [Model][google.cloud.aiplatform.v1.BatchPredictionJob.model] to produce predictions - on multiple [input - instances][google.cloud.aiplatform.v1.BatchPredictionJob.input_config]. - If predictions for significant portion of the - instances fail, the job may finish without attempting - predictions for all remaining instances. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, batch_prediction_job]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.CreateBatchPredictionJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.CreateBatchPredictionJobRequest): - request = job_service.CreateBatchPredictionJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if batch_prediction_job is not None: - request.batch_prediction_job = batch_prediction_job - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_batch_prediction_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_batch_prediction_job(self, - request: Union[job_service.GetBatchPredictionJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> batch_prediction_job.BatchPredictionJob: - r"""Gets a BatchPredictionJob - - Args: - request (Union[google.cloud.aiplatform_v1.types.GetBatchPredictionJobRequest, dict]): - The request object. Request message for - [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1.JobService.GetBatchPredictionJob]. - name (str): - Required. The name of the BatchPredictionJob resource. - Format: - ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.BatchPredictionJob: - A job that uses a [Model][google.cloud.aiplatform.v1.BatchPredictionJob.model] to produce predictions - on multiple [input - instances][google.cloud.aiplatform.v1.BatchPredictionJob.input_config]. - If predictions for significant portion of the - instances fail, the job may finish without attempting - predictions for all remaining instances. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.GetBatchPredictionJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.GetBatchPredictionJobRequest): - request = job_service.GetBatchPredictionJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_batch_prediction_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_batch_prediction_jobs(self, - request: Union[job_service.ListBatchPredictionJobsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListBatchPredictionJobsPager: - r"""Lists BatchPredictionJobs in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListBatchPredictionJobsRequest, dict]): - The request object. Request message for - [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1.JobService.ListBatchPredictionJobs]. - parent (str): - Required. The resource name of the Location to list the - BatchPredictionJobs from. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.job_service.pagers.ListBatchPredictionJobsPager: - Response message for - [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1.JobService.ListBatchPredictionJobs] - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.ListBatchPredictionJobsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.ListBatchPredictionJobsRequest): - request = job_service.ListBatchPredictionJobsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_batch_prediction_jobs] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListBatchPredictionJobsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_batch_prediction_job(self, - request: Union[job_service.DeleteBatchPredictionJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a BatchPredictionJob. Can only be called on - jobs that already finished. - - Args: - request (Union[google.cloud.aiplatform_v1.types.DeleteBatchPredictionJobRequest, dict]): - The request object. Request message for - [JobService.DeleteBatchPredictionJob][google.cloud.aiplatform.v1.JobService.DeleteBatchPredictionJob]. - name (str): - Required. The name of the BatchPredictionJob resource to - be deleted. Format: - ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.DeleteBatchPredictionJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.DeleteBatchPredictionJobRequest): - request = job_service.DeleteBatchPredictionJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_batch_prediction_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def cancel_batch_prediction_job(self, - request: Union[job_service.CancelBatchPredictionJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Cancels a BatchPredictionJob. - - Starts asynchronous cancellation on the BatchPredictionJob. The - server makes the best effort to cancel the job, but success is - not guaranteed. Clients can use - [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1.JobService.GetBatchPredictionJob] - or other methods to check whether the cancellation succeeded or - whether the job completed despite cancellation. On a successful - cancellation, the BatchPredictionJob is not deleted;instead its - [BatchPredictionJob.state][google.cloud.aiplatform.v1.BatchPredictionJob.state] - is set to ``CANCELLED``. Any files already outputted by the job - are not deleted. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CancelBatchPredictionJobRequest, dict]): - The request object. Request message for - [JobService.CancelBatchPredictionJob][google.cloud.aiplatform.v1.JobService.CancelBatchPredictionJob]. - name (str): - Required. The name of the BatchPredictionJob to cancel. - Format: - ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.CancelBatchPredictionJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.CancelBatchPredictionJobRequest): - request = job_service.CancelBatchPredictionJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.cancel_batch_prediction_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - def create_model_deployment_monitoring_job(self, - request: Union[job_service.CreateModelDeploymentMonitoringJobRequest, dict] = None, - *, - parent: str = None, - model_deployment_monitoring_job: gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob: - r"""Creates a ModelDeploymentMonitoringJob. It will run - periodically on a configured interval. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CreateModelDeploymentMonitoringJobRequest, dict]): - The request object. Request message for - [JobService.CreateModelDeploymentMonitoringJob][google.cloud.aiplatform.v1.JobService.CreateModelDeploymentMonitoringJob]. - parent (str): - Required. The parent of the - ModelDeploymentMonitoringJob. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - model_deployment_monitoring_job (google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob): - Required. The - ModelDeploymentMonitoringJob to create - - This corresponds to the ``model_deployment_monitoring_job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob: - Represents a job that runs - periodically to monitor the deployed - models in an endpoint. It will analyze - the logged training & prediction data to - detect any abnormal behaviors. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, model_deployment_monitoring_job]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.CreateModelDeploymentMonitoringJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.CreateModelDeploymentMonitoringJobRequest): - request = job_service.CreateModelDeploymentMonitoringJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if model_deployment_monitoring_job is not None: - request.model_deployment_monitoring_job = model_deployment_monitoring_job - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_model_deployment_monitoring_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def search_model_deployment_monitoring_stats_anomalies(self, - request: Union[job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest, dict] = None, - *, - model_deployment_monitoring_job: str = None, - deployed_model_id: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.SearchModelDeploymentMonitoringStatsAnomaliesPager: - r"""Searches Model Monitoring Statistics generated within - a given time window. - - Args: - request (Union[google.cloud.aiplatform_v1.types.SearchModelDeploymentMonitoringStatsAnomaliesRequest, dict]): - The request object. Request message for - [JobService.SearchModelDeploymentMonitoringStatsAnomalies][google.cloud.aiplatform.v1.JobService.SearchModelDeploymentMonitoringStatsAnomalies]. - model_deployment_monitoring_job (str): - Required. ModelDeploymentMonitoring Job resource name. - Format: - \`projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job} - - This corresponds to the ``model_deployment_monitoring_job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - deployed_model_id (str): - Required. The DeployedModel ID of the - [ModelDeploymentMonitoringObjectiveConfig.deployed_model_id]. - - This corresponds to the ``deployed_model_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.job_service.pagers.SearchModelDeploymentMonitoringStatsAnomaliesPager: - Response message for - [JobService.SearchModelDeploymentMonitoringStatsAnomalies][google.cloud.aiplatform.v1.JobService.SearchModelDeploymentMonitoringStatsAnomalies]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([model_deployment_monitoring_job, deployed_model_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest): - request = job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if model_deployment_monitoring_job is not None: - request.model_deployment_monitoring_job = model_deployment_monitoring_job - if deployed_model_id is not None: - request.deployed_model_id = deployed_model_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.search_model_deployment_monitoring_stats_anomalies] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("model_deployment_monitoring_job", request.model_deployment_monitoring_job), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.SearchModelDeploymentMonitoringStatsAnomaliesPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_model_deployment_monitoring_job(self, - request: Union[job_service.GetModelDeploymentMonitoringJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> model_deployment_monitoring_job.ModelDeploymentMonitoringJob: - r"""Gets a ModelDeploymentMonitoringJob. - - Args: - request (Union[google.cloud.aiplatform_v1.types.GetModelDeploymentMonitoringJobRequest, dict]): - The request object. Request message for - [JobService.GetModelDeploymentMonitoringJob][google.cloud.aiplatform.v1.JobService.GetModelDeploymentMonitoringJob]. - name (str): - Required. The resource name of the - ModelDeploymentMonitoringJob. Format: - ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob: - Represents a job that runs - periodically to monitor the deployed - models in an endpoint. It will analyze - the logged training & prediction data to - detect any abnormal behaviors. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.GetModelDeploymentMonitoringJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.GetModelDeploymentMonitoringJobRequest): - request = job_service.GetModelDeploymentMonitoringJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_model_deployment_monitoring_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_model_deployment_monitoring_jobs(self, - request: Union[job_service.ListModelDeploymentMonitoringJobsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListModelDeploymentMonitoringJobsPager: - r"""Lists ModelDeploymentMonitoringJobs in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListModelDeploymentMonitoringJobsRequest, dict]): - The request object. Request message for - [JobService.ListModelDeploymentMonitoringJobs][google.cloud.aiplatform.v1.JobService.ListModelDeploymentMonitoringJobs]. - parent (str): - Required. The parent of the - ModelDeploymentMonitoringJob. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.job_service.pagers.ListModelDeploymentMonitoringJobsPager: - Response message for - [JobService.ListModelDeploymentMonitoringJobs][google.cloud.aiplatform.v1.JobService.ListModelDeploymentMonitoringJobs]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.ListModelDeploymentMonitoringJobsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.ListModelDeploymentMonitoringJobsRequest): - request = job_service.ListModelDeploymentMonitoringJobsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_model_deployment_monitoring_jobs] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListModelDeploymentMonitoringJobsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_model_deployment_monitoring_job(self, - request: Union[job_service.UpdateModelDeploymentMonitoringJobRequest, dict] = None, - *, - model_deployment_monitoring_job: gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Updates a ModelDeploymentMonitoringJob. - - Args: - request (Union[google.cloud.aiplatform_v1.types.UpdateModelDeploymentMonitoringJobRequest, dict]): - The request object. Request message for - [JobService.UpdateModelDeploymentMonitoringJob][google.cloud.aiplatform.v1.JobService.UpdateModelDeploymentMonitoringJob]. - model_deployment_monitoring_job (google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob): - Required. The model monitoring - configuration which replaces the - resource on the server. - - This corresponds to the ``model_deployment_monitoring_job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The update mask is used to specify the fields - to be overwritten in the ModelDeploymentMonitoringJob - resource by the update. The fields specified in the - update_mask are relative to the resource, not the full - request. A field will be overwritten if it is in the - mask. If the user does not provide a mask then only the - non-empty fields present in the request will be - overwritten. Set the update_mask to ``*`` to override - all fields. For the objective config, the user can - either provide the update mask for - model_deployment_monitoring_objective_configs or any - combination of its nested fields, such as: - model_deployment_monitoring_objective_configs.objective_config.training_dataset. - - Updatable fields: - - - ``display_name`` - - ``model_deployment_monitoring_schedule_config`` - - ``model_monitoring_alert_config`` - - ``logging_sampling_strategy`` - - ``labels`` - - ``log_ttl`` - - ``enable_monitoring_pipeline_logs`` . and - - ``model_deployment_monitoring_objective_configs`` . - or - - ``model_deployment_monitoring_objective_configs.objective_config.training_dataset`` - - ``model_deployment_monitoring_objective_configs.objective_config.training_prediction_skew_detection_config`` - - ``model_deployment_monitoring_objective_configs.objective_config.prediction_drift_detection_config`` - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob` Represents a job that runs periodically to monitor the deployed models in an - endpoint. It will analyze the logged training & - prediction data to detect any abnormal behaviors. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([model_deployment_monitoring_job, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.UpdateModelDeploymentMonitoringJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.UpdateModelDeploymentMonitoringJobRequest): - request = job_service.UpdateModelDeploymentMonitoringJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if model_deployment_monitoring_job is not None: - request.model_deployment_monitoring_job = model_deployment_monitoring_job - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_model_deployment_monitoring_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("model_deployment_monitoring_job.name", request.model_deployment_monitoring_job.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob, - metadata_type=job_service.UpdateModelDeploymentMonitoringJobOperationMetadata, - ) - - # Done; return the response. - return response - - def delete_model_deployment_monitoring_job(self, - request: Union[job_service.DeleteModelDeploymentMonitoringJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a ModelDeploymentMonitoringJob. - - Args: - request (Union[google.cloud.aiplatform_v1.types.DeleteModelDeploymentMonitoringJobRequest, dict]): - The request object. Request message for - [JobService.DeleteModelDeploymentMonitoringJob][google.cloud.aiplatform.v1.JobService.DeleteModelDeploymentMonitoringJob]. - name (str): - Required. The resource name of the model monitoring job - to delete. Format: - ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.DeleteModelDeploymentMonitoringJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.DeleteModelDeploymentMonitoringJobRequest): - request = job_service.DeleteModelDeploymentMonitoringJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_model_deployment_monitoring_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def pause_model_deployment_monitoring_job(self, - request: Union[job_service.PauseModelDeploymentMonitoringJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Pauses a ModelDeploymentMonitoringJob. If the job is running, - the server makes a best effort to cancel the job. Will mark - [ModelDeploymentMonitoringJob.state][google.cloud.aiplatform.v1.ModelDeploymentMonitoringJob.state] - to 'PAUSED'. - - Args: - request (Union[google.cloud.aiplatform_v1.types.PauseModelDeploymentMonitoringJobRequest, dict]): - The request object. Request message for - [JobService.PauseModelDeploymentMonitoringJob][google.cloud.aiplatform.v1.JobService.PauseModelDeploymentMonitoringJob]. - name (str): - Required. The resource name of the - ModelDeploymentMonitoringJob to pause. Format: - ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.PauseModelDeploymentMonitoringJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.PauseModelDeploymentMonitoringJobRequest): - request = job_service.PauseModelDeploymentMonitoringJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.pause_model_deployment_monitoring_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - def resume_model_deployment_monitoring_job(self, - request: Union[job_service.ResumeModelDeploymentMonitoringJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Resumes a paused ModelDeploymentMonitoringJob. It - will start to run from next scheduled time. A deleted - ModelDeploymentMonitoringJob can't be resumed. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ResumeModelDeploymentMonitoringJobRequest, dict]): - The request object. Request message for - [JobService.ResumeModelDeploymentMonitoringJob][google.cloud.aiplatform.v1.JobService.ResumeModelDeploymentMonitoringJob]. - name (str): - Required. The resource name of the - ModelDeploymentMonitoringJob to resume. Format: - ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.ResumeModelDeploymentMonitoringJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.ResumeModelDeploymentMonitoringJobRequest): - request = job_service.ResumeModelDeploymentMonitoringJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.resume_model_deployment_monitoring_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - """Releases underlying transport's resources. - - .. warning:: - ONLY use as a context manager if the transport is NOT shared - with other clients! Exiting the with block will CLOSE the transport - and may cause errors in other clients! - """ - self.transport.close() - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "JobServiceClient", -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/pagers.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/pagers.py deleted file mode 100644 index a635b563e0..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/pagers.py +++ /dev/null @@ -1,756 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator - -from google.cloud.aiplatform_v1.types import batch_prediction_job -from google.cloud.aiplatform_v1.types import custom_job -from google.cloud.aiplatform_v1.types import data_labeling_job -from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job -from google.cloud.aiplatform_v1.types import job_service -from google.cloud.aiplatform_v1.types import model_deployment_monitoring_job -from google.cloud.aiplatform_v1.types import model_deployment_monitoring_job as gca_model_deployment_monitoring_job - - -class ListCustomJobsPager: - """A pager for iterating through ``list_custom_jobs`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListCustomJobsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``custom_jobs`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListCustomJobs`` requests and continue to iterate - through the ``custom_jobs`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListCustomJobsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., job_service.ListCustomJobsResponse], - request: job_service.ListCustomJobsRequest, - response: job_service.ListCustomJobsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListCustomJobsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListCustomJobsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = job_service.ListCustomJobsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[job_service.ListCustomJobsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[custom_job.CustomJob]: - for page in self.pages: - yield from page.custom_jobs - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListCustomJobsAsyncPager: - """A pager for iterating through ``list_custom_jobs`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListCustomJobsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``custom_jobs`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListCustomJobs`` requests and continue to iterate - through the ``custom_jobs`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListCustomJobsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[job_service.ListCustomJobsResponse]], - request: job_service.ListCustomJobsRequest, - response: job_service.ListCustomJobsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListCustomJobsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListCustomJobsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = job_service.ListCustomJobsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[job_service.ListCustomJobsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[custom_job.CustomJob]: - async def async_generator(): - async for page in self.pages: - for response in page.custom_jobs: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListDataLabelingJobsPager: - """A pager for iterating through ``list_data_labeling_jobs`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListDataLabelingJobsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``data_labeling_jobs`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListDataLabelingJobs`` requests and continue to iterate - through the ``data_labeling_jobs`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListDataLabelingJobsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., job_service.ListDataLabelingJobsResponse], - request: job_service.ListDataLabelingJobsRequest, - response: job_service.ListDataLabelingJobsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListDataLabelingJobsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListDataLabelingJobsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = job_service.ListDataLabelingJobsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[job_service.ListDataLabelingJobsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[data_labeling_job.DataLabelingJob]: - for page in self.pages: - yield from page.data_labeling_jobs - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListDataLabelingJobsAsyncPager: - """A pager for iterating through ``list_data_labeling_jobs`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListDataLabelingJobsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``data_labeling_jobs`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListDataLabelingJobs`` requests and continue to iterate - through the ``data_labeling_jobs`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListDataLabelingJobsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[job_service.ListDataLabelingJobsResponse]], - request: job_service.ListDataLabelingJobsRequest, - response: job_service.ListDataLabelingJobsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListDataLabelingJobsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListDataLabelingJobsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = job_service.ListDataLabelingJobsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[job_service.ListDataLabelingJobsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[data_labeling_job.DataLabelingJob]: - async def async_generator(): - async for page in self.pages: - for response in page.data_labeling_jobs: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListHyperparameterTuningJobsPager: - """A pager for iterating through ``list_hyperparameter_tuning_jobs`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``hyperparameter_tuning_jobs`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListHyperparameterTuningJobs`` requests and continue to iterate - through the ``hyperparameter_tuning_jobs`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., job_service.ListHyperparameterTuningJobsResponse], - request: job_service.ListHyperparameterTuningJobsRequest, - response: job_service.ListHyperparameterTuningJobsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = job_service.ListHyperparameterTuningJobsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[job_service.ListHyperparameterTuningJobsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[hyperparameter_tuning_job.HyperparameterTuningJob]: - for page in self.pages: - yield from page.hyperparameter_tuning_jobs - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListHyperparameterTuningJobsAsyncPager: - """A pager for iterating through ``list_hyperparameter_tuning_jobs`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``hyperparameter_tuning_jobs`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListHyperparameterTuningJobs`` requests and continue to iterate - through the ``hyperparameter_tuning_jobs`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[job_service.ListHyperparameterTuningJobsResponse]], - request: job_service.ListHyperparameterTuningJobsRequest, - response: job_service.ListHyperparameterTuningJobsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = job_service.ListHyperparameterTuningJobsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[job_service.ListHyperparameterTuningJobsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[hyperparameter_tuning_job.HyperparameterTuningJob]: - async def async_generator(): - async for page in self.pages: - for response in page.hyperparameter_tuning_jobs: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListBatchPredictionJobsPager: - """A pager for iterating through ``list_batch_prediction_jobs`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListBatchPredictionJobsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``batch_prediction_jobs`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListBatchPredictionJobs`` requests and continue to iterate - through the ``batch_prediction_jobs`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListBatchPredictionJobsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., job_service.ListBatchPredictionJobsResponse], - request: job_service.ListBatchPredictionJobsRequest, - response: job_service.ListBatchPredictionJobsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListBatchPredictionJobsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListBatchPredictionJobsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = job_service.ListBatchPredictionJobsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[job_service.ListBatchPredictionJobsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[batch_prediction_job.BatchPredictionJob]: - for page in self.pages: - yield from page.batch_prediction_jobs - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListBatchPredictionJobsAsyncPager: - """A pager for iterating through ``list_batch_prediction_jobs`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListBatchPredictionJobsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``batch_prediction_jobs`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListBatchPredictionJobs`` requests and continue to iterate - through the ``batch_prediction_jobs`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListBatchPredictionJobsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[job_service.ListBatchPredictionJobsResponse]], - request: job_service.ListBatchPredictionJobsRequest, - response: job_service.ListBatchPredictionJobsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListBatchPredictionJobsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListBatchPredictionJobsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = job_service.ListBatchPredictionJobsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[job_service.ListBatchPredictionJobsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[batch_prediction_job.BatchPredictionJob]: - async def async_generator(): - async for page in self.pages: - for response in page.batch_prediction_jobs: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class SearchModelDeploymentMonitoringStatsAnomaliesPager: - """A pager for iterating through ``search_model_deployment_monitoring_stats_anomalies`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.SearchModelDeploymentMonitoringStatsAnomaliesResponse` object, and - provides an ``__iter__`` method to iterate through its - ``monitoring_stats`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``SearchModelDeploymentMonitoringStatsAnomalies`` requests and continue to iterate - through the ``monitoring_stats`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.SearchModelDeploymentMonitoringStatsAnomaliesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse], - request: job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest, - response: job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.SearchModelDeploymentMonitoringStatsAnomaliesRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.SearchModelDeploymentMonitoringStatsAnomaliesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies]: - for page in self.pages: - yield from page.monitoring_stats - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class SearchModelDeploymentMonitoringStatsAnomaliesAsyncPager: - """A pager for iterating through ``search_model_deployment_monitoring_stats_anomalies`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.SearchModelDeploymentMonitoringStatsAnomaliesResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``monitoring_stats`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``SearchModelDeploymentMonitoringStatsAnomalies`` requests and continue to iterate - through the ``monitoring_stats`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.SearchModelDeploymentMonitoringStatsAnomaliesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse]], - request: job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest, - response: job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.SearchModelDeploymentMonitoringStatsAnomaliesRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.SearchModelDeploymentMonitoringStatsAnomaliesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies]: - async def async_generator(): - async for page in self.pages: - for response in page.monitoring_stats: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListModelDeploymentMonitoringJobsPager: - """A pager for iterating through ``list_model_deployment_monitoring_jobs`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListModelDeploymentMonitoringJobsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``model_deployment_monitoring_jobs`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListModelDeploymentMonitoringJobs`` requests and continue to iterate - through the ``model_deployment_monitoring_jobs`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListModelDeploymentMonitoringJobsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., job_service.ListModelDeploymentMonitoringJobsResponse], - request: job_service.ListModelDeploymentMonitoringJobsRequest, - response: job_service.ListModelDeploymentMonitoringJobsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListModelDeploymentMonitoringJobsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListModelDeploymentMonitoringJobsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = job_service.ListModelDeploymentMonitoringJobsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[job_service.ListModelDeploymentMonitoringJobsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[model_deployment_monitoring_job.ModelDeploymentMonitoringJob]: - for page in self.pages: - yield from page.model_deployment_monitoring_jobs - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListModelDeploymentMonitoringJobsAsyncPager: - """A pager for iterating through ``list_model_deployment_monitoring_jobs`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListModelDeploymentMonitoringJobsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``model_deployment_monitoring_jobs`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListModelDeploymentMonitoringJobs`` requests and continue to iterate - through the ``model_deployment_monitoring_jobs`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListModelDeploymentMonitoringJobsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[job_service.ListModelDeploymentMonitoringJobsResponse]], - request: job_service.ListModelDeploymentMonitoringJobsRequest, - response: job_service.ListModelDeploymentMonitoringJobsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListModelDeploymentMonitoringJobsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListModelDeploymentMonitoringJobsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = job_service.ListModelDeploymentMonitoringJobsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[job_service.ListModelDeploymentMonitoringJobsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[model_deployment_monitoring_job.ModelDeploymentMonitoringJob]: - async def async_generator(): - async for page in self.pages: - for response in page.model_deployment_monitoring_jobs: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/__init__.py deleted file mode 100644 index 13c5f7ade5..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import JobServiceTransport -from .grpc import JobServiceGrpcTransport -from .grpc_asyncio import JobServiceGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[JobServiceTransport]] -_transport_registry['grpc'] = JobServiceGrpcTransport -_transport_registry['grpc_asyncio'] = JobServiceGrpcAsyncIOTransport - -__all__ = ( - 'JobServiceTransport', - 'JobServiceGrpcTransport', - 'JobServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/base.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/base.py deleted file mode 100644 index edf63595f9..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/base.py +++ /dev/null @@ -1,542 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import pkg_resources - -import google.auth # type: ignore -import google.api_core -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.aiplatform_v1.types import batch_prediction_job -from google.cloud.aiplatform_v1.types import batch_prediction_job as gca_batch_prediction_job -from google.cloud.aiplatform_v1.types import custom_job -from google.cloud.aiplatform_v1.types import custom_job as gca_custom_job -from google.cloud.aiplatform_v1.types import data_labeling_job -from google.cloud.aiplatform_v1.types import data_labeling_job as gca_data_labeling_job -from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job -from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job as gca_hyperparameter_tuning_job -from google.cloud.aiplatform_v1.types import job_service -from google.cloud.aiplatform_v1.types import model_deployment_monitoring_job -from google.cloud.aiplatform_v1.types import model_deployment_monitoring_job as gca_model_deployment_monitoring_job -from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-aiplatform', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -class JobServiceTransport(abc.ABC): - """Abstract transport class for JobService.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'aiplatform.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials are service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.create_custom_job: gapic_v1.method.wrap_method( - self.create_custom_job, - default_timeout=None, - client_info=client_info, - ), - self.get_custom_job: gapic_v1.method.wrap_method( - self.get_custom_job, - default_timeout=None, - client_info=client_info, - ), - self.list_custom_jobs: gapic_v1.method.wrap_method( - self.list_custom_jobs, - default_timeout=None, - client_info=client_info, - ), - self.delete_custom_job: gapic_v1.method.wrap_method( - self.delete_custom_job, - default_timeout=None, - client_info=client_info, - ), - self.cancel_custom_job: gapic_v1.method.wrap_method( - self.cancel_custom_job, - default_timeout=None, - client_info=client_info, - ), - self.create_data_labeling_job: gapic_v1.method.wrap_method( - self.create_data_labeling_job, - default_timeout=None, - client_info=client_info, - ), - self.get_data_labeling_job: gapic_v1.method.wrap_method( - self.get_data_labeling_job, - default_timeout=None, - client_info=client_info, - ), - self.list_data_labeling_jobs: gapic_v1.method.wrap_method( - self.list_data_labeling_jobs, - default_timeout=None, - client_info=client_info, - ), - self.delete_data_labeling_job: gapic_v1.method.wrap_method( - self.delete_data_labeling_job, - default_timeout=None, - client_info=client_info, - ), - self.cancel_data_labeling_job: gapic_v1.method.wrap_method( - self.cancel_data_labeling_job, - default_timeout=None, - client_info=client_info, - ), - self.create_hyperparameter_tuning_job: gapic_v1.method.wrap_method( - self.create_hyperparameter_tuning_job, - default_timeout=None, - client_info=client_info, - ), - self.get_hyperparameter_tuning_job: gapic_v1.method.wrap_method( - self.get_hyperparameter_tuning_job, - default_timeout=None, - client_info=client_info, - ), - self.list_hyperparameter_tuning_jobs: gapic_v1.method.wrap_method( - self.list_hyperparameter_tuning_jobs, - default_timeout=None, - client_info=client_info, - ), - self.delete_hyperparameter_tuning_job: gapic_v1.method.wrap_method( - self.delete_hyperparameter_tuning_job, - default_timeout=None, - client_info=client_info, - ), - self.cancel_hyperparameter_tuning_job: gapic_v1.method.wrap_method( - self.cancel_hyperparameter_tuning_job, - default_timeout=None, - client_info=client_info, - ), - self.create_batch_prediction_job: gapic_v1.method.wrap_method( - self.create_batch_prediction_job, - default_timeout=None, - client_info=client_info, - ), - self.get_batch_prediction_job: gapic_v1.method.wrap_method( - self.get_batch_prediction_job, - default_timeout=None, - client_info=client_info, - ), - self.list_batch_prediction_jobs: gapic_v1.method.wrap_method( - self.list_batch_prediction_jobs, - default_timeout=None, - client_info=client_info, - ), - self.delete_batch_prediction_job: gapic_v1.method.wrap_method( - self.delete_batch_prediction_job, - default_timeout=None, - client_info=client_info, - ), - self.cancel_batch_prediction_job: gapic_v1.method.wrap_method( - self.cancel_batch_prediction_job, - default_timeout=None, - client_info=client_info, - ), - self.create_model_deployment_monitoring_job: gapic_v1.method.wrap_method( - self.create_model_deployment_monitoring_job, - default_timeout=None, - client_info=client_info, - ), - self.search_model_deployment_monitoring_stats_anomalies: gapic_v1.method.wrap_method( - self.search_model_deployment_monitoring_stats_anomalies, - default_timeout=None, - client_info=client_info, - ), - self.get_model_deployment_monitoring_job: gapic_v1.method.wrap_method( - self.get_model_deployment_monitoring_job, - default_timeout=None, - client_info=client_info, - ), - self.list_model_deployment_monitoring_jobs: gapic_v1.method.wrap_method( - self.list_model_deployment_monitoring_jobs, - default_timeout=None, - client_info=client_info, - ), - self.update_model_deployment_monitoring_job: gapic_v1.method.wrap_method( - self.update_model_deployment_monitoring_job, - default_timeout=None, - client_info=client_info, - ), - self.delete_model_deployment_monitoring_job: gapic_v1.method.wrap_method( - self.delete_model_deployment_monitoring_job, - default_timeout=None, - client_info=client_info, - ), - self.pause_model_deployment_monitoring_job: gapic_v1.method.wrap_method( - self.pause_model_deployment_monitoring_job, - default_timeout=None, - client_info=client_info, - ), - self.resume_model_deployment_monitoring_job: gapic_v1.method.wrap_method( - self.resume_model_deployment_monitoring_job, - default_timeout=None, - client_info=client_info, - ), - } - - def close(self): - """Closes resources associated with the transport. - - .. warning:: - Only call this method if the transport is NOT shared - with other clients - this may cause errors in other clients! - """ - raise NotImplementedError() - - @property - def operations_client(self): - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def create_custom_job(self) -> Callable[ - [job_service.CreateCustomJobRequest], - Union[ - gca_custom_job.CustomJob, - Awaitable[gca_custom_job.CustomJob] - ]]: - raise NotImplementedError() - - @property - def get_custom_job(self) -> Callable[ - [job_service.GetCustomJobRequest], - Union[ - custom_job.CustomJob, - Awaitable[custom_job.CustomJob] - ]]: - raise NotImplementedError() - - @property - def list_custom_jobs(self) -> Callable[ - [job_service.ListCustomJobsRequest], - Union[ - job_service.ListCustomJobsResponse, - Awaitable[job_service.ListCustomJobsResponse] - ]]: - raise NotImplementedError() - - @property - def delete_custom_job(self) -> Callable[ - [job_service.DeleteCustomJobRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def cancel_custom_job(self) -> Callable[ - [job_service.CancelCustomJobRequest], - Union[ - empty_pb2.Empty, - Awaitable[empty_pb2.Empty] - ]]: - raise NotImplementedError() - - @property - def create_data_labeling_job(self) -> Callable[ - [job_service.CreateDataLabelingJobRequest], - Union[ - gca_data_labeling_job.DataLabelingJob, - Awaitable[gca_data_labeling_job.DataLabelingJob] - ]]: - raise NotImplementedError() - - @property - def get_data_labeling_job(self) -> Callable[ - [job_service.GetDataLabelingJobRequest], - Union[ - data_labeling_job.DataLabelingJob, - Awaitable[data_labeling_job.DataLabelingJob] - ]]: - raise NotImplementedError() - - @property - def list_data_labeling_jobs(self) -> Callable[ - [job_service.ListDataLabelingJobsRequest], - Union[ - job_service.ListDataLabelingJobsResponse, - Awaitable[job_service.ListDataLabelingJobsResponse] - ]]: - raise NotImplementedError() - - @property - def delete_data_labeling_job(self) -> Callable[ - [job_service.DeleteDataLabelingJobRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def cancel_data_labeling_job(self) -> Callable[ - [job_service.CancelDataLabelingJobRequest], - Union[ - empty_pb2.Empty, - Awaitable[empty_pb2.Empty] - ]]: - raise NotImplementedError() - - @property - def create_hyperparameter_tuning_job(self) -> Callable[ - [job_service.CreateHyperparameterTuningJobRequest], - Union[ - gca_hyperparameter_tuning_job.HyperparameterTuningJob, - Awaitable[gca_hyperparameter_tuning_job.HyperparameterTuningJob] - ]]: - raise NotImplementedError() - - @property - def get_hyperparameter_tuning_job(self) -> Callable[ - [job_service.GetHyperparameterTuningJobRequest], - Union[ - hyperparameter_tuning_job.HyperparameterTuningJob, - Awaitable[hyperparameter_tuning_job.HyperparameterTuningJob] - ]]: - raise NotImplementedError() - - @property - def list_hyperparameter_tuning_jobs(self) -> Callable[ - [job_service.ListHyperparameterTuningJobsRequest], - Union[ - job_service.ListHyperparameterTuningJobsResponse, - Awaitable[job_service.ListHyperparameterTuningJobsResponse] - ]]: - raise NotImplementedError() - - @property - def delete_hyperparameter_tuning_job(self) -> Callable[ - [job_service.DeleteHyperparameterTuningJobRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def cancel_hyperparameter_tuning_job(self) -> Callable[ - [job_service.CancelHyperparameterTuningJobRequest], - Union[ - empty_pb2.Empty, - Awaitable[empty_pb2.Empty] - ]]: - raise NotImplementedError() - - @property - def create_batch_prediction_job(self) -> Callable[ - [job_service.CreateBatchPredictionJobRequest], - Union[ - gca_batch_prediction_job.BatchPredictionJob, - Awaitable[gca_batch_prediction_job.BatchPredictionJob] - ]]: - raise NotImplementedError() - - @property - def get_batch_prediction_job(self) -> Callable[ - [job_service.GetBatchPredictionJobRequest], - Union[ - batch_prediction_job.BatchPredictionJob, - Awaitable[batch_prediction_job.BatchPredictionJob] - ]]: - raise NotImplementedError() - - @property - def list_batch_prediction_jobs(self) -> Callable[ - [job_service.ListBatchPredictionJobsRequest], - Union[ - job_service.ListBatchPredictionJobsResponse, - Awaitable[job_service.ListBatchPredictionJobsResponse] - ]]: - raise NotImplementedError() - - @property - def delete_batch_prediction_job(self) -> Callable[ - [job_service.DeleteBatchPredictionJobRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def cancel_batch_prediction_job(self) -> Callable[ - [job_service.CancelBatchPredictionJobRequest], - Union[ - empty_pb2.Empty, - Awaitable[empty_pb2.Empty] - ]]: - raise NotImplementedError() - - @property - def create_model_deployment_monitoring_job(self) -> Callable[ - [job_service.CreateModelDeploymentMonitoringJobRequest], - Union[ - gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob, - Awaitable[gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob] - ]]: - raise NotImplementedError() - - @property - def search_model_deployment_monitoring_stats_anomalies(self) -> Callable[ - [job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest], - Union[ - job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse, - Awaitable[job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse] - ]]: - raise NotImplementedError() - - @property - def get_model_deployment_monitoring_job(self) -> Callable[ - [job_service.GetModelDeploymentMonitoringJobRequest], - Union[ - model_deployment_monitoring_job.ModelDeploymentMonitoringJob, - Awaitable[model_deployment_monitoring_job.ModelDeploymentMonitoringJob] - ]]: - raise NotImplementedError() - - @property - def list_model_deployment_monitoring_jobs(self) -> Callable[ - [job_service.ListModelDeploymentMonitoringJobsRequest], - Union[ - job_service.ListModelDeploymentMonitoringJobsResponse, - Awaitable[job_service.ListModelDeploymentMonitoringJobsResponse] - ]]: - raise NotImplementedError() - - @property - def update_model_deployment_monitoring_job(self) -> Callable[ - [job_service.UpdateModelDeploymentMonitoringJobRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def delete_model_deployment_monitoring_job(self) -> Callable[ - [job_service.DeleteModelDeploymentMonitoringJobRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def pause_model_deployment_monitoring_job(self) -> Callable[ - [job_service.PauseModelDeploymentMonitoringJobRequest], - Union[ - empty_pb2.Empty, - Awaitable[empty_pb2.Empty] - ]]: - raise NotImplementedError() - - @property - def resume_model_deployment_monitoring_job(self) -> Callable[ - [job_service.ResumeModelDeploymentMonitoringJobRequest], - Union[ - empty_pb2.Empty, - Awaitable[empty_pb2.Empty] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'JobServiceTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/grpc.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/grpc.py deleted file mode 100644 index 597e76ebda..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/grpc.py +++ /dev/null @@ -1,1045 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers -from google.api_core import operations_v1 -from google.api_core import gapic_v1 -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.aiplatform_v1.types import batch_prediction_job -from google.cloud.aiplatform_v1.types import batch_prediction_job as gca_batch_prediction_job -from google.cloud.aiplatform_v1.types import custom_job -from google.cloud.aiplatform_v1.types import custom_job as gca_custom_job -from google.cloud.aiplatform_v1.types import data_labeling_job -from google.cloud.aiplatform_v1.types import data_labeling_job as gca_data_labeling_job -from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job -from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job as gca_hyperparameter_tuning_job -from google.cloud.aiplatform_v1.types import job_service -from google.cloud.aiplatform_v1.types import model_deployment_monitoring_job -from google.cloud.aiplatform_v1.types import model_deployment_monitoring_job as gca_model_deployment_monitoring_job -from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from .base import JobServiceTransport, DEFAULT_CLIENT_INFO - - -class JobServiceGrpcTransport(JobServiceTransport): - """gRPC backend transport for JobService. - - A service for creating and managing Vertex AI's jobs. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_custom_job(self) -> Callable[ - [job_service.CreateCustomJobRequest], - gca_custom_job.CustomJob]: - r"""Return a callable for the create custom job method over gRPC. - - Creates a CustomJob. A created CustomJob right away - will be attempted to be run. - - Returns: - Callable[[~.CreateCustomJobRequest], - ~.CustomJob]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_custom_job' not in self._stubs: - self._stubs['create_custom_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/CreateCustomJob', - request_serializer=job_service.CreateCustomJobRequest.serialize, - response_deserializer=gca_custom_job.CustomJob.deserialize, - ) - return self._stubs['create_custom_job'] - - @property - def get_custom_job(self) -> Callable[ - [job_service.GetCustomJobRequest], - custom_job.CustomJob]: - r"""Return a callable for the get custom job method over gRPC. - - Gets a CustomJob. - - Returns: - Callable[[~.GetCustomJobRequest], - ~.CustomJob]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_custom_job' not in self._stubs: - self._stubs['get_custom_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/GetCustomJob', - request_serializer=job_service.GetCustomJobRequest.serialize, - response_deserializer=custom_job.CustomJob.deserialize, - ) - return self._stubs['get_custom_job'] - - @property - def list_custom_jobs(self) -> Callable[ - [job_service.ListCustomJobsRequest], - job_service.ListCustomJobsResponse]: - r"""Return a callable for the list custom jobs method over gRPC. - - Lists CustomJobs in a Location. - - Returns: - Callable[[~.ListCustomJobsRequest], - ~.ListCustomJobsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_custom_jobs' not in self._stubs: - self._stubs['list_custom_jobs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/ListCustomJobs', - request_serializer=job_service.ListCustomJobsRequest.serialize, - response_deserializer=job_service.ListCustomJobsResponse.deserialize, - ) - return self._stubs['list_custom_jobs'] - - @property - def delete_custom_job(self) -> Callable[ - [job_service.DeleteCustomJobRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete custom job method over gRPC. - - Deletes a CustomJob. - - Returns: - Callable[[~.DeleteCustomJobRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_custom_job' not in self._stubs: - self._stubs['delete_custom_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/DeleteCustomJob', - request_serializer=job_service.DeleteCustomJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_custom_job'] - - @property - def cancel_custom_job(self) -> Callable[ - [job_service.CancelCustomJobRequest], - empty_pb2.Empty]: - r"""Return a callable for the cancel custom job method over gRPC. - - Cancels a CustomJob. Starts asynchronous cancellation on the - CustomJob. The server makes a best effort to cancel the job, but - success is not guaranteed. Clients can use - [JobService.GetCustomJob][google.cloud.aiplatform.v1.JobService.GetCustomJob] - or other methods to check whether the cancellation succeeded or - whether the job completed despite cancellation. On successful - cancellation, the CustomJob is not deleted; instead it becomes a - job with a - [CustomJob.error][google.cloud.aiplatform.v1.CustomJob.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [CustomJob.state][google.cloud.aiplatform.v1.CustomJob.state] is - set to ``CANCELLED``. - - Returns: - Callable[[~.CancelCustomJobRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'cancel_custom_job' not in self._stubs: - self._stubs['cancel_custom_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/CancelCustomJob', - request_serializer=job_service.CancelCustomJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['cancel_custom_job'] - - @property - def create_data_labeling_job(self) -> Callable[ - [job_service.CreateDataLabelingJobRequest], - gca_data_labeling_job.DataLabelingJob]: - r"""Return a callable for the create data labeling job method over gRPC. - - Creates a DataLabelingJob. - - Returns: - Callable[[~.CreateDataLabelingJobRequest], - ~.DataLabelingJob]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_data_labeling_job' not in self._stubs: - self._stubs['create_data_labeling_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/CreateDataLabelingJob', - request_serializer=job_service.CreateDataLabelingJobRequest.serialize, - response_deserializer=gca_data_labeling_job.DataLabelingJob.deserialize, - ) - return self._stubs['create_data_labeling_job'] - - @property - def get_data_labeling_job(self) -> Callable[ - [job_service.GetDataLabelingJobRequest], - data_labeling_job.DataLabelingJob]: - r"""Return a callable for the get data labeling job method over gRPC. - - Gets a DataLabelingJob. - - Returns: - Callable[[~.GetDataLabelingJobRequest], - ~.DataLabelingJob]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_data_labeling_job' not in self._stubs: - self._stubs['get_data_labeling_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/GetDataLabelingJob', - request_serializer=job_service.GetDataLabelingJobRequest.serialize, - response_deserializer=data_labeling_job.DataLabelingJob.deserialize, - ) - return self._stubs['get_data_labeling_job'] - - @property - def list_data_labeling_jobs(self) -> Callable[ - [job_service.ListDataLabelingJobsRequest], - job_service.ListDataLabelingJobsResponse]: - r"""Return a callable for the list data labeling jobs method over gRPC. - - Lists DataLabelingJobs in a Location. - - Returns: - Callable[[~.ListDataLabelingJobsRequest], - ~.ListDataLabelingJobsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_data_labeling_jobs' not in self._stubs: - self._stubs['list_data_labeling_jobs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/ListDataLabelingJobs', - request_serializer=job_service.ListDataLabelingJobsRequest.serialize, - response_deserializer=job_service.ListDataLabelingJobsResponse.deserialize, - ) - return self._stubs['list_data_labeling_jobs'] - - @property - def delete_data_labeling_job(self) -> Callable[ - [job_service.DeleteDataLabelingJobRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete data labeling job method over gRPC. - - Deletes a DataLabelingJob. - - Returns: - Callable[[~.DeleteDataLabelingJobRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_data_labeling_job' not in self._stubs: - self._stubs['delete_data_labeling_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/DeleteDataLabelingJob', - request_serializer=job_service.DeleteDataLabelingJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_data_labeling_job'] - - @property - def cancel_data_labeling_job(self) -> Callable[ - [job_service.CancelDataLabelingJobRequest], - empty_pb2.Empty]: - r"""Return a callable for the cancel data labeling job method over gRPC. - - Cancels a DataLabelingJob. Success of cancellation is - not guaranteed. - - Returns: - Callable[[~.CancelDataLabelingJobRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'cancel_data_labeling_job' not in self._stubs: - self._stubs['cancel_data_labeling_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/CancelDataLabelingJob', - request_serializer=job_service.CancelDataLabelingJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['cancel_data_labeling_job'] - - @property - def create_hyperparameter_tuning_job(self) -> Callable[ - [job_service.CreateHyperparameterTuningJobRequest], - gca_hyperparameter_tuning_job.HyperparameterTuningJob]: - r"""Return a callable for the create hyperparameter tuning - job method over gRPC. - - Creates a HyperparameterTuningJob - - Returns: - Callable[[~.CreateHyperparameterTuningJobRequest], - ~.HyperparameterTuningJob]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_hyperparameter_tuning_job' not in self._stubs: - self._stubs['create_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/CreateHyperparameterTuningJob', - request_serializer=job_service.CreateHyperparameterTuningJobRequest.serialize, - response_deserializer=gca_hyperparameter_tuning_job.HyperparameterTuningJob.deserialize, - ) - return self._stubs['create_hyperparameter_tuning_job'] - - @property - def get_hyperparameter_tuning_job(self) -> Callable[ - [job_service.GetHyperparameterTuningJobRequest], - hyperparameter_tuning_job.HyperparameterTuningJob]: - r"""Return a callable for the get hyperparameter tuning job method over gRPC. - - Gets a HyperparameterTuningJob - - Returns: - Callable[[~.GetHyperparameterTuningJobRequest], - ~.HyperparameterTuningJob]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_hyperparameter_tuning_job' not in self._stubs: - self._stubs['get_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/GetHyperparameterTuningJob', - request_serializer=job_service.GetHyperparameterTuningJobRequest.serialize, - response_deserializer=hyperparameter_tuning_job.HyperparameterTuningJob.deserialize, - ) - return self._stubs['get_hyperparameter_tuning_job'] - - @property - def list_hyperparameter_tuning_jobs(self) -> Callable[ - [job_service.ListHyperparameterTuningJobsRequest], - job_service.ListHyperparameterTuningJobsResponse]: - r"""Return a callable for the list hyperparameter tuning - jobs method over gRPC. - - Lists HyperparameterTuningJobs in a Location. - - Returns: - Callable[[~.ListHyperparameterTuningJobsRequest], - ~.ListHyperparameterTuningJobsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_hyperparameter_tuning_jobs' not in self._stubs: - self._stubs['list_hyperparameter_tuning_jobs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/ListHyperparameterTuningJobs', - request_serializer=job_service.ListHyperparameterTuningJobsRequest.serialize, - response_deserializer=job_service.ListHyperparameterTuningJobsResponse.deserialize, - ) - return self._stubs['list_hyperparameter_tuning_jobs'] - - @property - def delete_hyperparameter_tuning_job(self) -> Callable[ - [job_service.DeleteHyperparameterTuningJobRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete hyperparameter tuning - job method over gRPC. - - Deletes a HyperparameterTuningJob. - - Returns: - Callable[[~.DeleteHyperparameterTuningJobRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_hyperparameter_tuning_job' not in self._stubs: - self._stubs['delete_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/DeleteHyperparameterTuningJob', - request_serializer=job_service.DeleteHyperparameterTuningJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_hyperparameter_tuning_job'] - - @property - def cancel_hyperparameter_tuning_job(self) -> Callable[ - [job_service.CancelHyperparameterTuningJobRequest], - empty_pb2.Empty]: - r"""Return a callable for the cancel hyperparameter tuning - job method over gRPC. - - Cancels a HyperparameterTuningJob. Starts asynchronous - cancellation on the HyperparameterTuningJob. The server makes a - best effort to cancel the job, but success is not guaranteed. - Clients can use - [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.GetHyperparameterTuningJob] - or other methods to check whether the cancellation succeeded or - whether the job completed despite cancellation. On successful - cancellation, the HyperparameterTuningJob is not deleted; - instead it becomes a job with a - [HyperparameterTuningJob.error][google.cloud.aiplatform.v1.HyperparameterTuningJob.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [HyperparameterTuningJob.state][google.cloud.aiplatform.v1.HyperparameterTuningJob.state] - is set to ``CANCELLED``. - - Returns: - Callable[[~.CancelHyperparameterTuningJobRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'cancel_hyperparameter_tuning_job' not in self._stubs: - self._stubs['cancel_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/CancelHyperparameterTuningJob', - request_serializer=job_service.CancelHyperparameterTuningJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['cancel_hyperparameter_tuning_job'] - - @property - def create_batch_prediction_job(self) -> Callable[ - [job_service.CreateBatchPredictionJobRequest], - gca_batch_prediction_job.BatchPredictionJob]: - r"""Return a callable for the create batch prediction job method over gRPC. - - Creates a BatchPredictionJob. A BatchPredictionJob - once created will right away be attempted to start. - - Returns: - Callable[[~.CreateBatchPredictionJobRequest], - ~.BatchPredictionJob]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_batch_prediction_job' not in self._stubs: - self._stubs['create_batch_prediction_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/CreateBatchPredictionJob', - request_serializer=job_service.CreateBatchPredictionJobRequest.serialize, - response_deserializer=gca_batch_prediction_job.BatchPredictionJob.deserialize, - ) - return self._stubs['create_batch_prediction_job'] - - @property - def get_batch_prediction_job(self) -> Callable[ - [job_service.GetBatchPredictionJobRequest], - batch_prediction_job.BatchPredictionJob]: - r"""Return a callable for the get batch prediction job method over gRPC. - - Gets a BatchPredictionJob - - Returns: - Callable[[~.GetBatchPredictionJobRequest], - ~.BatchPredictionJob]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_batch_prediction_job' not in self._stubs: - self._stubs['get_batch_prediction_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/GetBatchPredictionJob', - request_serializer=job_service.GetBatchPredictionJobRequest.serialize, - response_deserializer=batch_prediction_job.BatchPredictionJob.deserialize, - ) - return self._stubs['get_batch_prediction_job'] - - @property - def list_batch_prediction_jobs(self) -> Callable[ - [job_service.ListBatchPredictionJobsRequest], - job_service.ListBatchPredictionJobsResponse]: - r"""Return a callable for the list batch prediction jobs method over gRPC. - - Lists BatchPredictionJobs in a Location. - - Returns: - Callable[[~.ListBatchPredictionJobsRequest], - ~.ListBatchPredictionJobsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_batch_prediction_jobs' not in self._stubs: - self._stubs['list_batch_prediction_jobs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/ListBatchPredictionJobs', - request_serializer=job_service.ListBatchPredictionJobsRequest.serialize, - response_deserializer=job_service.ListBatchPredictionJobsResponse.deserialize, - ) - return self._stubs['list_batch_prediction_jobs'] - - @property - def delete_batch_prediction_job(self) -> Callable[ - [job_service.DeleteBatchPredictionJobRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete batch prediction job method over gRPC. - - Deletes a BatchPredictionJob. Can only be called on - jobs that already finished. - - Returns: - Callable[[~.DeleteBatchPredictionJobRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_batch_prediction_job' not in self._stubs: - self._stubs['delete_batch_prediction_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/DeleteBatchPredictionJob', - request_serializer=job_service.DeleteBatchPredictionJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_batch_prediction_job'] - - @property - def cancel_batch_prediction_job(self) -> Callable[ - [job_service.CancelBatchPredictionJobRequest], - empty_pb2.Empty]: - r"""Return a callable for the cancel batch prediction job method over gRPC. - - Cancels a BatchPredictionJob. - - Starts asynchronous cancellation on the BatchPredictionJob. The - server makes the best effort to cancel the job, but success is - not guaranteed. Clients can use - [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1.JobService.GetBatchPredictionJob] - or other methods to check whether the cancellation succeeded or - whether the job completed despite cancellation. On a successful - cancellation, the BatchPredictionJob is not deleted;instead its - [BatchPredictionJob.state][google.cloud.aiplatform.v1.BatchPredictionJob.state] - is set to ``CANCELLED``. Any files already outputted by the job - are not deleted. - - Returns: - Callable[[~.CancelBatchPredictionJobRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'cancel_batch_prediction_job' not in self._stubs: - self._stubs['cancel_batch_prediction_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/CancelBatchPredictionJob', - request_serializer=job_service.CancelBatchPredictionJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['cancel_batch_prediction_job'] - - @property - def create_model_deployment_monitoring_job(self) -> Callable[ - [job_service.CreateModelDeploymentMonitoringJobRequest], - gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob]: - r"""Return a callable for the create model deployment - monitoring job method over gRPC. - - Creates a ModelDeploymentMonitoringJob. It will run - periodically on a configured interval. - - Returns: - Callable[[~.CreateModelDeploymentMonitoringJobRequest], - ~.ModelDeploymentMonitoringJob]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_model_deployment_monitoring_job' not in self._stubs: - self._stubs['create_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/CreateModelDeploymentMonitoringJob', - request_serializer=job_service.CreateModelDeploymentMonitoringJobRequest.serialize, - response_deserializer=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.deserialize, - ) - return self._stubs['create_model_deployment_monitoring_job'] - - @property - def search_model_deployment_monitoring_stats_anomalies(self) -> Callable[ - [job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest], - job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse]: - r"""Return a callable for the search model deployment - monitoring stats anomalies method over gRPC. - - Searches Model Monitoring Statistics generated within - a given time window. - - Returns: - Callable[[~.SearchModelDeploymentMonitoringStatsAnomaliesRequest], - ~.SearchModelDeploymentMonitoringStatsAnomaliesResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'search_model_deployment_monitoring_stats_anomalies' not in self._stubs: - self._stubs['search_model_deployment_monitoring_stats_anomalies'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/SearchModelDeploymentMonitoringStatsAnomalies', - request_serializer=job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest.serialize, - response_deserializer=job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse.deserialize, - ) - return self._stubs['search_model_deployment_monitoring_stats_anomalies'] - - @property - def get_model_deployment_monitoring_job(self) -> Callable[ - [job_service.GetModelDeploymentMonitoringJobRequest], - model_deployment_monitoring_job.ModelDeploymentMonitoringJob]: - r"""Return a callable for the get model deployment - monitoring job method over gRPC. - - Gets a ModelDeploymentMonitoringJob. - - Returns: - Callable[[~.GetModelDeploymentMonitoringJobRequest], - ~.ModelDeploymentMonitoringJob]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_model_deployment_monitoring_job' not in self._stubs: - self._stubs['get_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/GetModelDeploymentMonitoringJob', - request_serializer=job_service.GetModelDeploymentMonitoringJobRequest.serialize, - response_deserializer=model_deployment_monitoring_job.ModelDeploymentMonitoringJob.deserialize, - ) - return self._stubs['get_model_deployment_monitoring_job'] - - @property - def list_model_deployment_monitoring_jobs(self) -> Callable[ - [job_service.ListModelDeploymentMonitoringJobsRequest], - job_service.ListModelDeploymentMonitoringJobsResponse]: - r"""Return a callable for the list model deployment - monitoring jobs method over gRPC. - - Lists ModelDeploymentMonitoringJobs in a Location. - - Returns: - Callable[[~.ListModelDeploymentMonitoringJobsRequest], - ~.ListModelDeploymentMonitoringJobsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_model_deployment_monitoring_jobs' not in self._stubs: - self._stubs['list_model_deployment_monitoring_jobs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/ListModelDeploymentMonitoringJobs', - request_serializer=job_service.ListModelDeploymentMonitoringJobsRequest.serialize, - response_deserializer=job_service.ListModelDeploymentMonitoringJobsResponse.deserialize, - ) - return self._stubs['list_model_deployment_monitoring_jobs'] - - @property - def update_model_deployment_monitoring_job(self) -> Callable[ - [job_service.UpdateModelDeploymentMonitoringJobRequest], - operations_pb2.Operation]: - r"""Return a callable for the update model deployment - monitoring job method over gRPC. - - Updates a ModelDeploymentMonitoringJob. - - Returns: - Callable[[~.UpdateModelDeploymentMonitoringJobRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_model_deployment_monitoring_job' not in self._stubs: - self._stubs['update_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/UpdateModelDeploymentMonitoringJob', - request_serializer=job_service.UpdateModelDeploymentMonitoringJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['update_model_deployment_monitoring_job'] - - @property - def delete_model_deployment_monitoring_job(self) -> Callable[ - [job_service.DeleteModelDeploymentMonitoringJobRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete model deployment - monitoring job method over gRPC. - - Deletes a ModelDeploymentMonitoringJob. - - Returns: - Callable[[~.DeleteModelDeploymentMonitoringJobRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_model_deployment_monitoring_job' not in self._stubs: - self._stubs['delete_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/DeleteModelDeploymentMonitoringJob', - request_serializer=job_service.DeleteModelDeploymentMonitoringJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_model_deployment_monitoring_job'] - - @property - def pause_model_deployment_monitoring_job(self) -> Callable[ - [job_service.PauseModelDeploymentMonitoringJobRequest], - empty_pb2.Empty]: - r"""Return a callable for the pause model deployment - monitoring job method over gRPC. - - Pauses a ModelDeploymentMonitoringJob. If the job is running, - the server makes a best effort to cancel the job. Will mark - [ModelDeploymentMonitoringJob.state][google.cloud.aiplatform.v1.ModelDeploymentMonitoringJob.state] - to 'PAUSED'. - - Returns: - Callable[[~.PauseModelDeploymentMonitoringJobRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'pause_model_deployment_monitoring_job' not in self._stubs: - self._stubs['pause_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/PauseModelDeploymentMonitoringJob', - request_serializer=job_service.PauseModelDeploymentMonitoringJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['pause_model_deployment_monitoring_job'] - - @property - def resume_model_deployment_monitoring_job(self) -> Callable[ - [job_service.ResumeModelDeploymentMonitoringJobRequest], - empty_pb2.Empty]: - r"""Return a callable for the resume model deployment - monitoring job method over gRPC. - - Resumes a paused ModelDeploymentMonitoringJob. It - will start to run from next scheduled time. A deleted - ModelDeploymentMonitoringJob can't be resumed. - - Returns: - Callable[[~.ResumeModelDeploymentMonitoringJobRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'resume_model_deployment_monitoring_job' not in self._stubs: - self._stubs['resume_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/ResumeModelDeploymentMonitoringJob', - request_serializer=job_service.ResumeModelDeploymentMonitoringJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['resume_model_deployment_monitoring_job'] - - def close(self): - self.grpc_channel.close() - -__all__ = ( - 'JobServiceGrpcTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/grpc_asyncio.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/grpc_asyncio.py deleted file mode 100644 index ce716ce9fd..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/job_service/transports/grpc_asyncio.py +++ /dev/null @@ -1,1049 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers_async -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.aiplatform_v1.types import batch_prediction_job -from google.cloud.aiplatform_v1.types import batch_prediction_job as gca_batch_prediction_job -from google.cloud.aiplatform_v1.types import custom_job -from google.cloud.aiplatform_v1.types import custom_job as gca_custom_job -from google.cloud.aiplatform_v1.types import data_labeling_job -from google.cloud.aiplatform_v1.types import data_labeling_job as gca_data_labeling_job -from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job -from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job as gca_hyperparameter_tuning_job -from google.cloud.aiplatform_v1.types import job_service -from google.cloud.aiplatform_v1.types import model_deployment_monitoring_job -from google.cloud.aiplatform_v1.types import model_deployment_monitoring_job as gca_model_deployment_monitoring_job -from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from .base import JobServiceTransport, DEFAULT_CLIENT_INFO -from .grpc import JobServiceGrpcTransport - - -class JobServiceGrpcAsyncIOTransport(JobServiceTransport): - """gRPC AsyncIO backend transport for JobService. - - A service for creating and managing Vertex AI's jobs. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsAsyncClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_custom_job(self) -> Callable[ - [job_service.CreateCustomJobRequest], - Awaitable[gca_custom_job.CustomJob]]: - r"""Return a callable for the create custom job method over gRPC. - - Creates a CustomJob. A created CustomJob right away - will be attempted to be run. - - Returns: - Callable[[~.CreateCustomJobRequest], - Awaitable[~.CustomJob]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_custom_job' not in self._stubs: - self._stubs['create_custom_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/CreateCustomJob', - request_serializer=job_service.CreateCustomJobRequest.serialize, - response_deserializer=gca_custom_job.CustomJob.deserialize, - ) - return self._stubs['create_custom_job'] - - @property - def get_custom_job(self) -> Callable[ - [job_service.GetCustomJobRequest], - Awaitable[custom_job.CustomJob]]: - r"""Return a callable for the get custom job method over gRPC. - - Gets a CustomJob. - - Returns: - Callable[[~.GetCustomJobRequest], - Awaitable[~.CustomJob]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_custom_job' not in self._stubs: - self._stubs['get_custom_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/GetCustomJob', - request_serializer=job_service.GetCustomJobRequest.serialize, - response_deserializer=custom_job.CustomJob.deserialize, - ) - return self._stubs['get_custom_job'] - - @property - def list_custom_jobs(self) -> Callable[ - [job_service.ListCustomJobsRequest], - Awaitable[job_service.ListCustomJobsResponse]]: - r"""Return a callable for the list custom jobs method over gRPC. - - Lists CustomJobs in a Location. - - Returns: - Callable[[~.ListCustomJobsRequest], - Awaitable[~.ListCustomJobsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_custom_jobs' not in self._stubs: - self._stubs['list_custom_jobs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/ListCustomJobs', - request_serializer=job_service.ListCustomJobsRequest.serialize, - response_deserializer=job_service.ListCustomJobsResponse.deserialize, - ) - return self._stubs['list_custom_jobs'] - - @property - def delete_custom_job(self) -> Callable[ - [job_service.DeleteCustomJobRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete custom job method over gRPC. - - Deletes a CustomJob. - - Returns: - Callable[[~.DeleteCustomJobRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_custom_job' not in self._stubs: - self._stubs['delete_custom_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/DeleteCustomJob', - request_serializer=job_service.DeleteCustomJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_custom_job'] - - @property - def cancel_custom_job(self) -> Callable[ - [job_service.CancelCustomJobRequest], - Awaitable[empty_pb2.Empty]]: - r"""Return a callable for the cancel custom job method over gRPC. - - Cancels a CustomJob. Starts asynchronous cancellation on the - CustomJob. The server makes a best effort to cancel the job, but - success is not guaranteed. Clients can use - [JobService.GetCustomJob][google.cloud.aiplatform.v1.JobService.GetCustomJob] - or other methods to check whether the cancellation succeeded or - whether the job completed despite cancellation. On successful - cancellation, the CustomJob is not deleted; instead it becomes a - job with a - [CustomJob.error][google.cloud.aiplatform.v1.CustomJob.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [CustomJob.state][google.cloud.aiplatform.v1.CustomJob.state] is - set to ``CANCELLED``. - - Returns: - Callable[[~.CancelCustomJobRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'cancel_custom_job' not in self._stubs: - self._stubs['cancel_custom_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/CancelCustomJob', - request_serializer=job_service.CancelCustomJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['cancel_custom_job'] - - @property - def create_data_labeling_job(self) -> Callable[ - [job_service.CreateDataLabelingJobRequest], - Awaitable[gca_data_labeling_job.DataLabelingJob]]: - r"""Return a callable for the create data labeling job method over gRPC. - - Creates a DataLabelingJob. - - Returns: - Callable[[~.CreateDataLabelingJobRequest], - Awaitable[~.DataLabelingJob]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_data_labeling_job' not in self._stubs: - self._stubs['create_data_labeling_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/CreateDataLabelingJob', - request_serializer=job_service.CreateDataLabelingJobRequest.serialize, - response_deserializer=gca_data_labeling_job.DataLabelingJob.deserialize, - ) - return self._stubs['create_data_labeling_job'] - - @property - def get_data_labeling_job(self) -> Callable[ - [job_service.GetDataLabelingJobRequest], - Awaitable[data_labeling_job.DataLabelingJob]]: - r"""Return a callable for the get data labeling job method over gRPC. - - Gets a DataLabelingJob. - - Returns: - Callable[[~.GetDataLabelingJobRequest], - Awaitable[~.DataLabelingJob]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_data_labeling_job' not in self._stubs: - self._stubs['get_data_labeling_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/GetDataLabelingJob', - request_serializer=job_service.GetDataLabelingJobRequest.serialize, - response_deserializer=data_labeling_job.DataLabelingJob.deserialize, - ) - return self._stubs['get_data_labeling_job'] - - @property - def list_data_labeling_jobs(self) -> Callable[ - [job_service.ListDataLabelingJobsRequest], - Awaitable[job_service.ListDataLabelingJobsResponse]]: - r"""Return a callable for the list data labeling jobs method over gRPC. - - Lists DataLabelingJobs in a Location. - - Returns: - Callable[[~.ListDataLabelingJobsRequest], - Awaitable[~.ListDataLabelingJobsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_data_labeling_jobs' not in self._stubs: - self._stubs['list_data_labeling_jobs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/ListDataLabelingJobs', - request_serializer=job_service.ListDataLabelingJobsRequest.serialize, - response_deserializer=job_service.ListDataLabelingJobsResponse.deserialize, - ) - return self._stubs['list_data_labeling_jobs'] - - @property - def delete_data_labeling_job(self) -> Callable[ - [job_service.DeleteDataLabelingJobRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete data labeling job method over gRPC. - - Deletes a DataLabelingJob. - - Returns: - Callable[[~.DeleteDataLabelingJobRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_data_labeling_job' not in self._stubs: - self._stubs['delete_data_labeling_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/DeleteDataLabelingJob', - request_serializer=job_service.DeleteDataLabelingJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_data_labeling_job'] - - @property - def cancel_data_labeling_job(self) -> Callable[ - [job_service.CancelDataLabelingJobRequest], - Awaitable[empty_pb2.Empty]]: - r"""Return a callable for the cancel data labeling job method over gRPC. - - Cancels a DataLabelingJob. Success of cancellation is - not guaranteed. - - Returns: - Callable[[~.CancelDataLabelingJobRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'cancel_data_labeling_job' not in self._stubs: - self._stubs['cancel_data_labeling_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/CancelDataLabelingJob', - request_serializer=job_service.CancelDataLabelingJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['cancel_data_labeling_job'] - - @property - def create_hyperparameter_tuning_job(self) -> Callable[ - [job_service.CreateHyperparameterTuningJobRequest], - Awaitable[gca_hyperparameter_tuning_job.HyperparameterTuningJob]]: - r"""Return a callable for the create hyperparameter tuning - job method over gRPC. - - Creates a HyperparameterTuningJob - - Returns: - Callable[[~.CreateHyperparameterTuningJobRequest], - Awaitable[~.HyperparameterTuningJob]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_hyperparameter_tuning_job' not in self._stubs: - self._stubs['create_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/CreateHyperparameterTuningJob', - request_serializer=job_service.CreateHyperparameterTuningJobRequest.serialize, - response_deserializer=gca_hyperparameter_tuning_job.HyperparameterTuningJob.deserialize, - ) - return self._stubs['create_hyperparameter_tuning_job'] - - @property - def get_hyperparameter_tuning_job(self) -> Callable[ - [job_service.GetHyperparameterTuningJobRequest], - Awaitable[hyperparameter_tuning_job.HyperparameterTuningJob]]: - r"""Return a callable for the get hyperparameter tuning job method over gRPC. - - Gets a HyperparameterTuningJob - - Returns: - Callable[[~.GetHyperparameterTuningJobRequest], - Awaitable[~.HyperparameterTuningJob]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_hyperparameter_tuning_job' not in self._stubs: - self._stubs['get_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/GetHyperparameterTuningJob', - request_serializer=job_service.GetHyperparameterTuningJobRequest.serialize, - response_deserializer=hyperparameter_tuning_job.HyperparameterTuningJob.deserialize, - ) - return self._stubs['get_hyperparameter_tuning_job'] - - @property - def list_hyperparameter_tuning_jobs(self) -> Callable[ - [job_service.ListHyperparameterTuningJobsRequest], - Awaitable[job_service.ListHyperparameterTuningJobsResponse]]: - r"""Return a callable for the list hyperparameter tuning - jobs method over gRPC. - - Lists HyperparameterTuningJobs in a Location. - - Returns: - Callable[[~.ListHyperparameterTuningJobsRequest], - Awaitable[~.ListHyperparameterTuningJobsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_hyperparameter_tuning_jobs' not in self._stubs: - self._stubs['list_hyperparameter_tuning_jobs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/ListHyperparameterTuningJobs', - request_serializer=job_service.ListHyperparameterTuningJobsRequest.serialize, - response_deserializer=job_service.ListHyperparameterTuningJobsResponse.deserialize, - ) - return self._stubs['list_hyperparameter_tuning_jobs'] - - @property - def delete_hyperparameter_tuning_job(self) -> Callable[ - [job_service.DeleteHyperparameterTuningJobRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete hyperparameter tuning - job method over gRPC. - - Deletes a HyperparameterTuningJob. - - Returns: - Callable[[~.DeleteHyperparameterTuningJobRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_hyperparameter_tuning_job' not in self._stubs: - self._stubs['delete_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/DeleteHyperparameterTuningJob', - request_serializer=job_service.DeleteHyperparameterTuningJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_hyperparameter_tuning_job'] - - @property - def cancel_hyperparameter_tuning_job(self) -> Callable[ - [job_service.CancelHyperparameterTuningJobRequest], - Awaitable[empty_pb2.Empty]]: - r"""Return a callable for the cancel hyperparameter tuning - job method over gRPC. - - Cancels a HyperparameterTuningJob. Starts asynchronous - cancellation on the HyperparameterTuningJob. The server makes a - best effort to cancel the job, but success is not guaranteed. - Clients can use - [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.GetHyperparameterTuningJob] - or other methods to check whether the cancellation succeeded or - whether the job completed despite cancellation. On successful - cancellation, the HyperparameterTuningJob is not deleted; - instead it becomes a job with a - [HyperparameterTuningJob.error][google.cloud.aiplatform.v1.HyperparameterTuningJob.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [HyperparameterTuningJob.state][google.cloud.aiplatform.v1.HyperparameterTuningJob.state] - is set to ``CANCELLED``. - - Returns: - Callable[[~.CancelHyperparameterTuningJobRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'cancel_hyperparameter_tuning_job' not in self._stubs: - self._stubs['cancel_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/CancelHyperparameterTuningJob', - request_serializer=job_service.CancelHyperparameterTuningJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['cancel_hyperparameter_tuning_job'] - - @property - def create_batch_prediction_job(self) -> Callable[ - [job_service.CreateBatchPredictionJobRequest], - Awaitable[gca_batch_prediction_job.BatchPredictionJob]]: - r"""Return a callable for the create batch prediction job method over gRPC. - - Creates a BatchPredictionJob. A BatchPredictionJob - once created will right away be attempted to start. - - Returns: - Callable[[~.CreateBatchPredictionJobRequest], - Awaitable[~.BatchPredictionJob]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_batch_prediction_job' not in self._stubs: - self._stubs['create_batch_prediction_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/CreateBatchPredictionJob', - request_serializer=job_service.CreateBatchPredictionJobRequest.serialize, - response_deserializer=gca_batch_prediction_job.BatchPredictionJob.deserialize, - ) - return self._stubs['create_batch_prediction_job'] - - @property - def get_batch_prediction_job(self) -> Callable[ - [job_service.GetBatchPredictionJobRequest], - Awaitable[batch_prediction_job.BatchPredictionJob]]: - r"""Return a callable for the get batch prediction job method over gRPC. - - Gets a BatchPredictionJob - - Returns: - Callable[[~.GetBatchPredictionJobRequest], - Awaitable[~.BatchPredictionJob]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_batch_prediction_job' not in self._stubs: - self._stubs['get_batch_prediction_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/GetBatchPredictionJob', - request_serializer=job_service.GetBatchPredictionJobRequest.serialize, - response_deserializer=batch_prediction_job.BatchPredictionJob.deserialize, - ) - return self._stubs['get_batch_prediction_job'] - - @property - def list_batch_prediction_jobs(self) -> Callable[ - [job_service.ListBatchPredictionJobsRequest], - Awaitable[job_service.ListBatchPredictionJobsResponse]]: - r"""Return a callable for the list batch prediction jobs method over gRPC. - - Lists BatchPredictionJobs in a Location. - - Returns: - Callable[[~.ListBatchPredictionJobsRequest], - Awaitable[~.ListBatchPredictionJobsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_batch_prediction_jobs' not in self._stubs: - self._stubs['list_batch_prediction_jobs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/ListBatchPredictionJobs', - request_serializer=job_service.ListBatchPredictionJobsRequest.serialize, - response_deserializer=job_service.ListBatchPredictionJobsResponse.deserialize, - ) - return self._stubs['list_batch_prediction_jobs'] - - @property - def delete_batch_prediction_job(self) -> Callable[ - [job_service.DeleteBatchPredictionJobRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete batch prediction job method over gRPC. - - Deletes a BatchPredictionJob. Can only be called on - jobs that already finished. - - Returns: - Callable[[~.DeleteBatchPredictionJobRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_batch_prediction_job' not in self._stubs: - self._stubs['delete_batch_prediction_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/DeleteBatchPredictionJob', - request_serializer=job_service.DeleteBatchPredictionJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_batch_prediction_job'] - - @property - def cancel_batch_prediction_job(self) -> Callable[ - [job_service.CancelBatchPredictionJobRequest], - Awaitable[empty_pb2.Empty]]: - r"""Return a callable for the cancel batch prediction job method over gRPC. - - Cancels a BatchPredictionJob. - - Starts asynchronous cancellation on the BatchPredictionJob. The - server makes the best effort to cancel the job, but success is - not guaranteed. Clients can use - [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1.JobService.GetBatchPredictionJob] - or other methods to check whether the cancellation succeeded or - whether the job completed despite cancellation. On a successful - cancellation, the BatchPredictionJob is not deleted;instead its - [BatchPredictionJob.state][google.cloud.aiplatform.v1.BatchPredictionJob.state] - is set to ``CANCELLED``. Any files already outputted by the job - are not deleted. - - Returns: - Callable[[~.CancelBatchPredictionJobRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'cancel_batch_prediction_job' not in self._stubs: - self._stubs['cancel_batch_prediction_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/CancelBatchPredictionJob', - request_serializer=job_service.CancelBatchPredictionJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['cancel_batch_prediction_job'] - - @property - def create_model_deployment_monitoring_job(self) -> Callable[ - [job_service.CreateModelDeploymentMonitoringJobRequest], - Awaitable[gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob]]: - r"""Return a callable for the create model deployment - monitoring job method over gRPC. - - Creates a ModelDeploymentMonitoringJob. It will run - periodically on a configured interval. - - Returns: - Callable[[~.CreateModelDeploymentMonitoringJobRequest], - Awaitable[~.ModelDeploymentMonitoringJob]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_model_deployment_monitoring_job' not in self._stubs: - self._stubs['create_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/CreateModelDeploymentMonitoringJob', - request_serializer=job_service.CreateModelDeploymentMonitoringJobRequest.serialize, - response_deserializer=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.deserialize, - ) - return self._stubs['create_model_deployment_monitoring_job'] - - @property - def search_model_deployment_monitoring_stats_anomalies(self) -> Callable[ - [job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest], - Awaitable[job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse]]: - r"""Return a callable for the search model deployment - monitoring stats anomalies method over gRPC. - - Searches Model Monitoring Statistics generated within - a given time window. - - Returns: - Callable[[~.SearchModelDeploymentMonitoringStatsAnomaliesRequest], - Awaitable[~.SearchModelDeploymentMonitoringStatsAnomaliesResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'search_model_deployment_monitoring_stats_anomalies' not in self._stubs: - self._stubs['search_model_deployment_monitoring_stats_anomalies'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/SearchModelDeploymentMonitoringStatsAnomalies', - request_serializer=job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest.serialize, - response_deserializer=job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse.deserialize, - ) - return self._stubs['search_model_deployment_monitoring_stats_anomalies'] - - @property - def get_model_deployment_monitoring_job(self) -> Callable[ - [job_service.GetModelDeploymentMonitoringJobRequest], - Awaitable[model_deployment_monitoring_job.ModelDeploymentMonitoringJob]]: - r"""Return a callable for the get model deployment - monitoring job method over gRPC. - - Gets a ModelDeploymentMonitoringJob. - - Returns: - Callable[[~.GetModelDeploymentMonitoringJobRequest], - Awaitable[~.ModelDeploymentMonitoringJob]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_model_deployment_monitoring_job' not in self._stubs: - self._stubs['get_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/GetModelDeploymentMonitoringJob', - request_serializer=job_service.GetModelDeploymentMonitoringJobRequest.serialize, - response_deserializer=model_deployment_monitoring_job.ModelDeploymentMonitoringJob.deserialize, - ) - return self._stubs['get_model_deployment_monitoring_job'] - - @property - def list_model_deployment_monitoring_jobs(self) -> Callable[ - [job_service.ListModelDeploymentMonitoringJobsRequest], - Awaitable[job_service.ListModelDeploymentMonitoringJobsResponse]]: - r"""Return a callable for the list model deployment - monitoring jobs method over gRPC. - - Lists ModelDeploymentMonitoringJobs in a Location. - - Returns: - Callable[[~.ListModelDeploymentMonitoringJobsRequest], - Awaitable[~.ListModelDeploymentMonitoringJobsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_model_deployment_monitoring_jobs' not in self._stubs: - self._stubs['list_model_deployment_monitoring_jobs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/ListModelDeploymentMonitoringJobs', - request_serializer=job_service.ListModelDeploymentMonitoringJobsRequest.serialize, - response_deserializer=job_service.ListModelDeploymentMonitoringJobsResponse.deserialize, - ) - return self._stubs['list_model_deployment_monitoring_jobs'] - - @property - def update_model_deployment_monitoring_job(self) -> Callable[ - [job_service.UpdateModelDeploymentMonitoringJobRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the update model deployment - monitoring job method over gRPC. - - Updates a ModelDeploymentMonitoringJob. - - Returns: - Callable[[~.UpdateModelDeploymentMonitoringJobRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_model_deployment_monitoring_job' not in self._stubs: - self._stubs['update_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/UpdateModelDeploymentMonitoringJob', - request_serializer=job_service.UpdateModelDeploymentMonitoringJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['update_model_deployment_monitoring_job'] - - @property - def delete_model_deployment_monitoring_job(self) -> Callable[ - [job_service.DeleteModelDeploymentMonitoringJobRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete model deployment - monitoring job method over gRPC. - - Deletes a ModelDeploymentMonitoringJob. - - Returns: - Callable[[~.DeleteModelDeploymentMonitoringJobRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_model_deployment_monitoring_job' not in self._stubs: - self._stubs['delete_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/DeleteModelDeploymentMonitoringJob', - request_serializer=job_service.DeleteModelDeploymentMonitoringJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_model_deployment_monitoring_job'] - - @property - def pause_model_deployment_monitoring_job(self) -> Callable[ - [job_service.PauseModelDeploymentMonitoringJobRequest], - Awaitable[empty_pb2.Empty]]: - r"""Return a callable for the pause model deployment - monitoring job method over gRPC. - - Pauses a ModelDeploymentMonitoringJob. If the job is running, - the server makes a best effort to cancel the job. Will mark - [ModelDeploymentMonitoringJob.state][google.cloud.aiplatform.v1.ModelDeploymentMonitoringJob.state] - to 'PAUSED'. - - Returns: - Callable[[~.PauseModelDeploymentMonitoringJobRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'pause_model_deployment_monitoring_job' not in self._stubs: - self._stubs['pause_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/PauseModelDeploymentMonitoringJob', - request_serializer=job_service.PauseModelDeploymentMonitoringJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['pause_model_deployment_monitoring_job'] - - @property - def resume_model_deployment_monitoring_job(self) -> Callable[ - [job_service.ResumeModelDeploymentMonitoringJobRequest], - Awaitable[empty_pb2.Empty]]: - r"""Return a callable for the resume model deployment - monitoring job method over gRPC. - - Resumes a paused ModelDeploymentMonitoringJob. It - will start to run from next scheduled time. A deleted - ModelDeploymentMonitoringJob can't be resumed. - - Returns: - Callable[[~.ResumeModelDeploymentMonitoringJobRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'resume_model_deployment_monitoring_job' not in self._stubs: - self._stubs['resume_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.JobService/ResumeModelDeploymentMonitoringJob', - request_serializer=job_service.ResumeModelDeploymentMonitoringJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['resume_model_deployment_monitoring_job'] - - def close(self): - return self.grpc_channel.close() - - -__all__ = ( - 'JobServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/__init__.py deleted file mode 100644 index b0a31fc612..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import MetadataServiceClient -from .async_client import MetadataServiceAsyncClient - -__all__ = ( - 'MetadataServiceClient', - 'MetadataServiceAsyncClient', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/async_client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/async_client.py deleted file mode 100644 index 618e9a9b7c..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/async_client.py +++ /dev/null @@ -1,2980 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core.client_options import ClientOptions -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1.services.metadata_service import pagers -from google.cloud.aiplatform_v1.types import artifact -from google.cloud.aiplatform_v1.types import artifact as gca_artifact -from google.cloud.aiplatform_v1.types import context -from google.cloud.aiplatform_v1.types import context as gca_context -from google.cloud.aiplatform_v1.types import encryption_spec -from google.cloud.aiplatform_v1.types import event -from google.cloud.aiplatform_v1.types import execution -from google.cloud.aiplatform_v1.types import execution as gca_execution -from google.cloud.aiplatform_v1.types import lineage_subgraph -from google.cloud.aiplatform_v1.types import metadata_schema -from google.cloud.aiplatform_v1.types import metadata_schema as gca_metadata_schema -from google.cloud.aiplatform_v1.types import metadata_service -from google.cloud.aiplatform_v1.types import metadata_store -from google.cloud.aiplatform_v1.types import metadata_store as gca_metadata_store -from google.cloud.aiplatform_v1.types import operation as gca_operation -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import MetadataServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import MetadataServiceGrpcAsyncIOTransport -from .client import MetadataServiceClient - - -class MetadataServiceAsyncClient: - """Service for reading and writing metadata entries.""" - - _client: MetadataServiceClient - - DEFAULT_ENDPOINT = MetadataServiceClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = MetadataServiceClient.DEFAULT_MTLS_ENDPOINT - - artifact_path = staticmethod(MetadataServiceClient.artifact_path) - parse_artifact_path = staticmethod(MetadataServiceClient.parse_artifact_path) - context_path = staticmethod(MetadataServiceClient.context_path) - parse_context_path = staticmethod(MetadataServiceClient.parse_context_path) - execution_path = staticmethod(MetadataServiceClient.execution_path) - parse_execution_path = staticmethod(MetadataServiceClient.parse_execution_path) - metadata_schema_path = staticmethod(MetadataServiceClient.metadata_schema_path) - parse_metadata_schema_path = staticmethod(MetadataServiceClient.parse_metadata_schema_path) - metadata_store_path = staticmethod(MetadataServiceClient.metadata_store_path) - parse_metadata_store_path = staticmethod(MetadataServiceClient.parse_metadata_store_path) - common_billing_account_path = staticmethod(MetadataServiceClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(MetadataServiceClient.parse_common_billing_account_path) - common_folder_path = staticmethod(MetadataServiceClient.common_folder_path) - parse_common_folder_path = staticmethod(MetadataServiceClient.parse_common_folder_path) - common_organization_path = staticmethod(MetadataServiceClient.common_organization_path) - parse_common_organization_path = staticmethod(MetadataServiceClient.parse_common_organization_path) - common_project_path = staticmethod(MetadataServiceClient.common_project_path) - parse_common_project_path = staticmethod(MetadataServiceClient.parse_common_project_path) - common_location_path = staticmethod(MetadataServiceClient.common_location_path) - parse_common_location_path = staticmethod(MetadataServiceClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - MetadataServiceAsyncClient: The constructed client. - """ - return MetadataServiceClient.from_service_account_info.__func__(MetadataServiceAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - MetadataServiceAsyncClient: The constructed client. - """ - return MetadataServiceClient.from_service_account_file.__func__(MetadataServiceAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> MetadataServiceTransport: - """Returns the transport used by the client instance. - - Returns: - MetadataServiceTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(MetadataServiceClient).get_transport_class, type(MetadataServiceClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, MetadataServiceTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the metadata service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.MetadataServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = MetadataServiceClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def create_metadata_store(self, - request: Union[metadata_service.CreateMetadataStoreRequest, dict] = None, - *, - parent: str = None, - metadata_store: gca_metadata_store.MetadataStore = None, - metadata_store_id: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Initializes a MetadataStore, including allocation of - resources. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CreateMetadataStoreRequest, dict]): - The request object. Request message for - [MetadataService.CreateMetadataStore][google.cloud.aiplatform.v1.MetadataService.CreateMetadataStore]. - parent (:class:`str`): - Required. The resource name of the Location where the - MetadataStore should be created. Format: - ``projects/{project}/locations/{location}/`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - metadata_store (:class:`google.cloud.aiplatform_v1.types.MetadataStore`): - Required. The MetadataStore to - create. - - This corresponds to the ``metadata_store`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - metadata_store_id (:class:`str`): - The {metadatastore} portion of the resource name with - the format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - If not provided, the MetadataStore's ID will be a UUID - generated by the service. Must be 4-128 characters in - length. Valid characters are ``/[a-z][0-9]-/``. Must be - unique across all MetadataStores in the parent Location. - (Otherwise the request will fail with ALREADY_EXISTS, or - PERMISSION_DENIED if the caller can't view the - preexisting MetadataStore.) - - This corresponds to the ``metadata_store_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.MetadataStore` Instance of a metadata store. Contains a set of metadata that can be - queried. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, metadata_store, metadata_store_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.CreateMetadataStoreRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if metadata_store is not None: - request.metadata_store = metadata_store - if metadata_store_id is not None: - request.metadata_store_id = metadata_store_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_metadata_store, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - gca_metadata_store.MetadataStore, - metadata_type=metadata_service.CreateMetadataStoreOperationMetadata, - ) - - # Done; return the response. - return response - - async def get_metadata_store(self, - request: Union[metadata_service.GetMetadataStoreRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> metadata_store.MetadataStore: - r"""Retrieves a specific MetadataStore. - - Args: - request (Union[google.cloud.aiplatform_v1.types.GetMetadataStoreRequest, dict]): - The request object. Request message for - [MetadataService.GetMetadataStore][google.cloud.aiplatform.v1.MetadataService.GetMetadataStore]. - name (:class:`str`): - Required. The resource name of the MetadataStore to - retrieve. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.MetadataStore: - Instance of a metadata store. - Contains a set of metadata that can be - queried. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.GetMetadataStoreRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_metadata_store, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_metadata_stores(self, - request: Union[metadata_service.ListMetadataStoresRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListMetadataStoresAsyncPager: - r"""Lists MetadataStores for a Location. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListMetadataStoresRequest, dict]): - The request object. Request message for - [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1.MetadataService.ListMetadataStores]. - parent (:class:`str`): - Required. The Location whose MetadataStores should be - listed. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.metadata_service.pagers.ListMetadataStoresAsyncPager: - Response message for - [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1.MetadataService.ListMetadataStores]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.ListMetadataStoresRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_metadata_stores, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListMetadataStoresAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_metadata_store(self, - request: Union[metadata_service.DeleteMetadataStoreRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a single MetadataStore and all its child - resources (Artifacts, Executions, and Contexts). - - Args: - request (Union[google.cloud.aiplatform_v1.types.DeleteMetadataStoreRequest, dict]): - The request object. Request message for - [MetadataService.DeleteMetadataStore][google.cloud.aiplatform.v1.MetadataService.DeleteMetadataStore]. - name (:class:`str`): - Required. The resource name of the MetadataStore to - delete. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.DeleteMetadataStoreRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_metadata_store, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=metadata_service.DeleteMetadataStoreOperationMetadata, - ) - - # Done; return the response. - return response - - async def create_artifact(self, - request: Union[metadata_service.CreateArtifactRequest, dict] = None, - *, - parent: str = None, - artifact: gca_artifact.Artifact = None, - artifact_id: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_artifact.Artifact: - r"""Creates an Artifact associated with a MetadataStore. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CreateArtifactRequest, dict]): - The request object. Request message for - [MetadataService.CreateArtifact][google.cloud.aiplatform.v1.MetadataService.CreateArtifact]. - parent (:class:`str`): - Required. The resource name of the MetadataStore where - the Artifact should be created. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - artifact (:class:`google.cloud.aiplatform_v1.types.Artifact`): - Required. The Artifact to create. - This corresponds to the ``artifact`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - artifact_id (:class:`str`): - The {artifact} portion of the resource name with the - format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` - If not provided, the Artifact's ID will be a UUID - generated by the service. Must be 4-128 characters in - length. Valid characters are ``/[a-z][0-9]-/``. Must be - unique across all Artifacts in the parent MetadataStore. - (Otherwise the request will fail with ALREADY_EXISTS, or - PERMISSION_DENIED if the caller can't view the - preexisting Artifact.) - - This corresponds to the ``artifact_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Artifact: - Instance of a general artifact. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, artifact, artifact_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.CreateArtifactRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if artifact is not None: - request.artifact = artifact - if artifact_id is not None: - request.artifact_id = artifact_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_artifact, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_artifact(self, - request: Union[metadata_service.GetArtifactRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> artifact.Artifact: - r"""Retrieves a specific Artifact. - - Args: - request (Union[google.cloud.aiplatform_v1.types.GetArtifactRequest, dict]): - The request object. Request message for - [MetadataService.GetArtifact][google.cloud.aiplatform.v1.MetadataService.GetArtifact]. - name (:class:`str`): - Required. The resource name of the Artifact to retrieve. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Artifact: - Instance of a general artifact. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.GetArtifactRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_artifact, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_artifacts(self, - request: Union[metadata_service.ListArtifactsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListArtifactsAsyncPager: - r"""Lists Artifacts in the MetadataStore. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListArtifactsRequest, dict]): - The request object. Request message for - [MetadataService.ListArtifacts][google.cloud.aiplatform.v1.MetadataService.ListArtifacts]. - parent (:class:`str`): - Required. The MetadataStore whose Artifacts should be - listed. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.metadata_service.pagers.ListArtifactsAsyncPager: - Response message for - [MetadataService.ListArtifacts][google.cloud.aiplatform.v1.MetadataService.ListArtifacts]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.ListArtifactsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_artifacts, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListArtifactsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_artifact(self, - request: Union[metadata_service.UpdateArtifactRequest, dict] = None, - *, - artifact: gca_artifact.Artifact = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_artifact.Artifact: - r"""Updates a stored Artifact. - - Args: - request (Union[google.cloud.aiplatform_v1.types.UpdateArtifactRequest, dict]): - The request object. Request message for - [MetadataService.UpdateArtifact][google.cloud.aiplatform.v1.MetadataService.UpdateArtifact]. - artifact (:class:`google.cloud.aiplatform_v1.types.Artifact`): - Required. The Artifact containing updates. The - Artifact's - [Artifact.name][google.cloud.aiplatform.v1.Artifact.name] - field is used to identify the Artifact to be updated. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` - - This corresponds to the ``artifact`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. A FieldMask indicating - which fields should be updated. - Functionality of this field is not yet - supported. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Artifact: - Instance of a general artifact. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([artifact, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.UpdateArtifactRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if artifact is not None: - request.artifact = artifact - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_artifact, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("artifact.name", request.artifact.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_artifact(self, - request: Union[metadata_service.DeleteArtifactRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes an Artifact. - - Args: - request (Union[google.cloud.aiplatform_v1.types.DeleteArtifactRequest, dict]): - The request object. Request message for - [MetadataService.DeleteArtifact][google.cloud.aiplatform.v1.MetadataService.DeleteArtifact]. - name (:class:`str`): - Required. The resource name of the Artifact to delete. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.DeleteArtifactRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_artifact, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def purge_artifacts(self, - request: Union[metadata_service.PurgeArtifactsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Purges Artifacts. - - Args: - request (Union[google.cloud.aiplatform_v1.types.PurgeArtifactsRequest, dict]): - The request object. Request message for - [MetadataService.PurgeArtifacts][google.cloud.aiplatform.v1.MetadataService.PurgeArtifacts]. - parent (:class:`str`): - Required. The metadata store to purge Artifacts from. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1.types.PurgeArtifactsResponse` - Response message for - [MetadataService.PurgeArtifacts][google.cloud.aiplatform.v1.MetadataService.PurgeArtifacts]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.PurgeArtifactsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.purge_artifacts, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - metadata_service.PurgeArtifactsResponse, - metadata_type=metadata_service.PurgeArtifactsMetadata, - ) - - # Done; return the response. - return response - - async def create_context(self, - request: Union[metadata_service.CreateContextRequest, dict] = None, - *, - parent: str = None, - context: gca_context.Context = None, - context_id: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_context.Context: - r"""Creates a Context associated with a MetadataStore. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CreateContextRequest, dict]): - The request object. Request message for - [MetadataService.CreateContext][google.cloud.aiplatform.v1.MetadataService.CreateContext]. - parent (:class:`str`): - Required. The resource name of the MetadataStore where - the Context should be created. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - context (:class:`google.cloud.aiplatform_v1.types.Context`): - Required. The Context to create. - This corresponds to the ``context`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - context_id (:class:`str`): - The {context} portion of the resource name with the - format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}``. - If not provided, the Context's ID will be a UUID - generated by the service. Must be 4-128 characters in - length. Valid characters are ``/[a-z][0-9]-/``. Must be - unique across all Contexts in the parent MetadataStore. - (Otherwise the request will fail with ALREADY_EXISTS, or - PERMISSION_DENIED if the caller can't view the - preexisting Context.) - - This corresponds to the ``context_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Context: - Instance of a general context. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, context, context_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.CreateContextRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if context is not None: - request.context = context - if context_id is not None: - request.context_id = context_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_context, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_context(self, - request: Union[metadata_service.GetContextRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> context.Context: - r"""Retrieves a specific Context. - - Args: - request (Union[google.cloud.aiplatform_v1.types.GetContextRequest, dict]): - The request object. Request message for - [MetadataService.GetContext][google.cloud.aiplatform.v1.MetadataService.GetContext]. - name (:class:`str`): - Required. The resource name of the Context to retrieve. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Context: - Instance of a general context. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.GetContextRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_context, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_contexts(self, - request: Union[metadata_service.ListContextsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListContextsAsyncPager: - r"""Lists Contexts on the MetadataStore. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListContextsRequest, dict]): - The request object. Request message for - [MetadataService.ListContexts][google.cloud.aiplatform.v1.MetadataService.ListContexts] - parent (:class:`str`): - Required. The MetadataStore whose Contexts should be - listed. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.metadata_service.pagers.ListContextsAsyncPager: - Response message for - [MetadataService.ListContexts][google.cloud.aiplatform.v1.MetadataService.ListContexts]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.ListContextsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_contexts, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListContextsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_context(self, - request: Union[metadata_service.UpdateContextRequest, dict] = None, - *, - context: gca_context.Context = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_context.Context: - r"""Updates a stored Context. - - Args: - request (Union[google.cloud.aiplatform_v1.types.UpdateContextRequest, dict]): - The request object. Request message for - [MetadataService.UpdateContext][google.cloud.aiplatform.v1.MetadataService.UpdateContext]. - context (:class:`google.cloud.aiplatform_v1.types.Context`): - Required. The Context containing updates. The Context's - [Context.name][google.cloud.aiplatform.v1.Context.name] - field is used to identify the Context to be updated. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` - - This corresponds to the ``context`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. A FieldMask indicating - which fields should be updated. - Functionality of this field is not yet - supported. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Context: - Instance of a general context. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([context, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.UpdateContextRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if context is not None: - request.context = context - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_context, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("context.name", request.context.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_context(self, - request: Union[metadata_service.DeleteContextRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a stored Context. - - Args: - request (Union[google.cloud.aiplatform_v1.types.DeleteContextRequest, dict]): - The request object. Request message for - [MetadataService.DeleteContext][google.cloud.aiplatform.v1.MetadataService.DeleteContext]. - name (:class:`str`): - Required. The resource name of the Context to delete. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.DeleteContextRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_context, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def purge_contexts(self, - request: Union[metadata_service.PurgeContextsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Purges Contexts. - - Args: - request (Union[google.cloud.aiplatform_v1.types.PurgeContextsRequest, dict]): - The request object. Request message for - [MetadataService.PurgeContexts][google.cloud.aiplatform.v1.MetadataService.PurgeContexts]. - parent (:class:`str`): - Required. The metadata store to purge Contexts from. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1.types.PurgeContextsResponse` - Response message for - [MetadataService.PurgeContexts][google.cloud.aiplatform.v1.MetadataService.PurgeContexts]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.PurgeContextsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.purge_contexts, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - metadata_service.PurgeContextsResponse, - metadata_type=metadata_service.PurgeContextsMetadata, - ) - - # Done; return the response. - return response - - async def add_context_artifacts_and_executions(self, - request: Union[metadata_service.AddContextArtifactsAndExecutionsRequest, dict] = None, - *, - context: str = None, - artifacts: Sequence[str] = None, - executions: Sequence[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> metadata_service.AddContextArtifactsAndExecutionsResponse: - r"""Adds a set of Artifacts and Executions to a Context. - If any of the Artifacts or Executions have already been - added to a Context, they are simply skipped. - - Args: - request (Union[google.cloud.aiplatform_v1.types.AddContextArtifactsAndExecutionsRequest, dict]): - The request object. Request message for - [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1.MetadataService.AddContextArtifactsAndExecutions]. - context (:class:`str`): - Required. The resource name of the Context that the - Artifacts and Executions belong to. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` - - This corresponds to the ``context`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - artifacts (:class:`Sequence[str]`): - The resource names of the Artifacts to attribute to the - Context. - - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` - - This corresponds to the ``artifacts`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - executions (:class:`Sequence[str]`): - The resource names of the Executions to associate with - the Context. - - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` - - This corresponds to the ``executions`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.AddContextArtifactsAndExecutionsResponse: - Response message for - [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1.MetadataService.AddContextArtifactsAndExecutions]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([context, artifacts, executions]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.AddContextArtifactsAndExecutionsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if context is not None: - request.context = context - if artifacts: - request.artifacts.extend(artifacts) - if executions: - request.executions.extend(executions) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.add_context_artifacts_and_executions, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("context", request.context), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def add_context_children(self, - request: Union[metadata_service.AddContextChildrenRequest, dict] = None, - *, - context: str = None, - child_contexts: Sequence[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> metadata_service.AddContextChildrenResponse: - r"""Adds a set of Contexts as children to a parent Context. If any - of the child Contexts have already been added to the parent - Context, they are simply skipped. If this call would create a - cycle or cause any Context to have more than 10 parents, the - request will fail with an INVALID_ARGUMENT error. - - Args: - request (Union[google.cloud.aiplatform_v1.types.AddContextChildrenRequest, dict]): - The request object. Request message for - [MetadataService.AddContextChildren][google.cloud.aiplatform.v1.MetadataService.AddContextChildren]. - context (:class:`str`): - Required. The resource name of the parent Context. - - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` - - This corresponds to the ``context`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - child_contexts (:class:`Sequence[str]`): - The resource names of the child - Contexts. - - This corresponds to the ``child_contexts`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.AddContextChildrenResponse: - Response message for - [MetadataService.AddContextChildren][google.cloud.aiplatform.v1.MetadataService.AddContextChildren]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([context, child_contexts]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.AddContextChildrenRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if context is not None: - request.context = context - if child_contexts: - request.child_contexts.extend(child_contexts) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.add_context_children, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("context", request.context), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def query_context_lineage_subgraph(self, - request: Union[metadata_service.QueryContextLineageSubgraphRequest, dict] = None, - *, - context: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> lineage_subgraph.LineageSubgraph: - r"""Retrieves Artifacts and Executions within the - specified Context, connected by Event edges and returned - as a LineageSubgraph. - - Args: - request (Union[google.cloud.aiplatform_v1.types.QueryContextLineageSubgraphRequest, dict]): - The request object. Request message for - [MetadataService.QueryContextLineageSubgraph][google.cloud.aiplatform.v1.MetadataService.QueryContextLineageSubgraph]. - context (:class:`str`): - Required. The resource name of the Context whose - Artifacts and Executions should be retrieved as a - LineageSubgraph. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` - - The request may error with FAILED_PRECONDITION if the - number of Artifacts, the number of Executions, or the - number of Events that would be returned for the Context - exceeds 1000. - - This corresponds to the ``context`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.LineageSubgraph: - A subgraph of the overall lineage - graph. Event edges connect Artifact and - Execution nodes. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([context]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.QueryContextLineageSubgraphRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if context is not None: - request.context = context - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.query_context_lineage_subgraph, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("context", request.context), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def create_execution(self, - request: Union[metadata_service.CreateExecutionRequest, dict] = None, - *, - parent: str = None, - execution: gca_execution.Execution = None, - execution_id: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_execution.Execution: - r"""Creates an Execution associated with a MetadataStore. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CreateExecutionRequest, dict]): - The request object. Request message for - [MetadataService.CreateExecution][google.cloud.aiplatform.v1.MetadataService.CreateExecution]. - parent (:class:`str`): - Required. The resource name of the MetadataStore where - the Execution should be created. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - execution (:class:`google.cloud.aiplatform_v1.types.Execution`): - Required. The Execution to create. - This corresponds to the ``execution`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - execution_id (:class:`str`): - The {execution} portion of the resource name with the - format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` - If not provided, the Execution's ID will be a UUID - generated by the service. Must be 4-128 characters in - length. Valid characters are ``/[a-z][0-9]-/``. Must be - unique across all Executions in the parent - MetadataStore. (Otherwise the request will fail with - ALREADY_EXISTS, or PERMISSION_DENIED if the caller can't - view the preexisting Execution.) - - This corresponds to the ``execution_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Execution: - Instance of a general execution. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, execution, execution_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.CreateExecutionRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if execution is not None: - request.execution = execution - if execution_id is not None: - request.execution_id = execution_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_execution, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_execution(self, - request: Union[metadata_service.GetExecutionRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> execution.Execution: - r"""Retrieves a specific Execution. - - Args: - request (Union[google.cloud.aiplatform_v1.types.GetExecutionRequest, dict]): - The request object. Request message for - [MetadataService.GetExecution][google.cloud.aiplatform.v1.MetadataService.GetExecution]. - name (:class:`str`): - Required. The resource name of the Execution to - retrieve. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Execution: - Instance of a general execution. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.GetExecutionRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_execution, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_executions(self, - request: Union[metadata_service.ListExecutionsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListExecutionsAsyncPager: - r"""Lists Executions in the MetadataStore. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListExecutionsRequest, dict]): - The request object. Request message for - [MetadataService.ListExecutions][google.cloud.aiplatform.v1.MetadataService.ListExecutions]. - parent (:class:`str`): - Required. The MetadataStore whose Executions should be - listed. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.metadata_service.pagers.ListExecutionsAsyncPager: - Response message for - [MetadataService.ListExecutions][google.cloud.aiplatform.v1.MetadataService.ListExecutions]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.ListExecutionsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_executions, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListExecutionsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_execution(self, - request: Union[metadata_service.UpdateExecutionRequest, dict] = None, - *, - execution: gca_execution.Execution = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_execution.Execution: - r"""Updates a stored Execution. - - Args: - request (Union[google.cloud.aiplatform_v1.types.UpdateExecutionRequest, dict]): - The request object. Request message for - [MetadataService.UpdateExecution][google.cloud.aiplatform.v1.MetadataService.UpdateExecution]. - execution (:class:`google.cloud.aiplatform_v1.types.Execution`): - Required. The Execution containing updates. The - Execution's - [Execution.name][google.cloud.aiplatform.v1.Execution.name] - field is used to identify the Execution to be updated. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` - - This corresponds to the ``execution`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. A FieldMask indicating - which fields should be updated. - Functionality of this field is not yet - supported. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Execution: - Instance of a general execution. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([execution, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.UpdateExecutionRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if execution is not None: - request.execution = execution - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_execution, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("execution.name", request.execution.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_execution(self, - request: Union[metadata_service.DeleteExecutionRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes an Execution. - - Args: - request (Union[google.cloud.aiplatform_v1.types.DeleteExecutionRequest, dict]): - The request object. Request message for - [MetadataService.DeleteExecution][google.cloud.aiplatform.v1.MetadataService.DeleteExecution]. - name (:class:`str`): - Required. The resource name of the Execution to delete. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.DeleteExecutionRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_execution, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def purge_executions(self, - request: Union[metadata_service.PurgeExecutionsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Purges Executions. - - Args: - request (Union[google.cloud.aiplatform_v1.types.PurgeExecutionsRequest, dict]): - The request object. Request message for - [MetadataService.PurgeExecutions][google.cloud.aiplatform.v1.MetadataService.PurgeExecutions]. - parent (:class:`str`): - Required. The metadata store to purge Executions from. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1.types.PurgeExecutionsResponse` - Response message for - [MetadataService.PurgeExecutions][google.cloud.aiplatform.v1.MetadataService.PurgeExecutions]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.PurgeExecutionsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.purge_executions, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - metadata_service.PurgeExecutionsResponse, - metadata_type=metadata_service.PurgeExecutionsMetadata, - ) - - # Done; return the response. - return response - - async def add_execution_events(self, - request: Union[metadata_service.AddExecutionEventsRequest, dict] = None, - *, - execution: str = None, - events: Sequence[event.Event] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> metadata_service.AddExecutionEventsResponse: - r"""Adds Events to the specified Execution. An Event - indicates whether an Artifact was used as an input or - output for an Execution. If an Event already exists - between the Execution and the Artifact, the Event is - skipped. - - Args: - request (Union[google.cloud.aiplatform_v1.types.AddExecutionEventsRequest, dict]): - The request object. Request message for - [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1.MetadataService.AddExecutionEvents]. - execution (:class:`str`): - Required. The resource name of the Execution that the - Events connect Artifacts with. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` - - This corresponds to the ``execution`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - events (:class:`Sequence[google.cloud.aiplatform_v1.types.Event]`): - The Events to create and add. - This corresponds to the ``events`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.AddExecutionEventsResponse: - Response message for - [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1.MetadataService.AddExecutionEvents]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([execution, events]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.AddExecutionEventsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if execution is not None: - request.execution = execution - if events: - request.events.extend(events) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.add_execution_events, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("execution", request.execution), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def query_execution_inputs_and_outputs(self, - request: Union[metadata_service.QueryExecutionInputsAndOutputsRequest, dict] = None, - *, - execution: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> lineage_subgraph.LineageSubgraph: - r"""Obtains the set of input and output Artifacts for - this Execution, in the form of LineageSubgraph that also - contains the Execution and connecting Events. - - Args: - request (Union[google.cloud.aiplatform_v1.types.QueryExecutionInputsAndOutputsRequest, dict]): - The request object. Request message for - [MetadataService.QueryExecutionInputsAndOutputs][google.cloud.aiplatform.v1.MetadataService.QueryExecutionInputsAndOutputs]. - execution (:class:`str`): - Required. The resource name of the Execution whose input - and output Artifacts should be retrieved as a - LineageSubgraph. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` - - This corresponds to the ``execution`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.LineageSubgraph: - A subgraph of the overall lineage - graph. Event edges connect Artifact and - Execution nodes. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([execution]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.QueryExecutionInputsAndOutputsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if execution is not None: - request.execution = execution - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.query_execution_inputs_and_outputs, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("execution", request.execution), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def create_metadata_schema(self, - request: Union[metadata_service.CreateMetadataSchemaRequest, dict] = None, - *, - parent: str = None, - metadata_schema: gca_metadata_schema.MetadataSchema = None, - metadata_schema_id: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_metadata_schema.MetadataSchema: - r"""Creates a MetadataSchema. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CreateMetadataSchemaRequest, dict]): - The request object. Request message for - [MetadataService.CreateMetadataSchema][google.cloud.aiplatform.v1.MetadataService.CreateMetadataSchema]. - parent (:class:`str`): - Required. The resource name of the MetadataStore where - the MetadataSchema should be created. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - metadata_schema (:class:`google.cloud.aiplatform_v1.types.MetadataSchema`): - Required. The MetadataSchema to - create. - - This corresponds to the ``metadata_schema`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - metadata_schema_id (:class:`str`): - The {metadata_schema} portion of the resource name with - the format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema}`` - If not provided, the MetadataStore's ID will be a UUID - generated by the service. Must be 4-128 characters in - length. Valid characters are ``/[a-z][0-9]-/``. Must be - unique across all MetadataSchemas in the parent - Location. (Otherwise the request will fail with - ALREADY_EXISTS, or PERMISSION_DENIED if the caller can't - view the preexisting MetadataSchema.) - - This corresponds to the ``metadata_schema_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.MetadataSchema: - Instance of a general MetadataSchema. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, metadata_schema, metadata_schema_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.CreateMetadataSchemaRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if metadata_schema is not None: - request.metadata_schema = metadata_schema - if metadata_schema_id is not None: - request.metadata_schema_id = metadata_schema_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_metadata_schema, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_metadata_schema(self, - request: Union[metadata_service.GetMetadataSchemaRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> metadata_schema.MetadataSchema: - r"""Retrieves a specific MetadataSchema. - - Args: - request (Union[google.cloud.aiplatform_v1.types.GetMetadataSchemaRequest, dict]): - The request object. Request message for - [MetadataService.GetMetadataSchema][google.cloud.aiplatform.v1.MetadataService.GetMetadataSchema]. - name (:class:`str`): - Required. The resource name of the MetadataSchema to - retrieve. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.MetadataSchema: - Instance of a general MetadataSchema. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.GetMetadataSchemaRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_metadata_schema, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_metadata_schemas(self, - request: Union[metadata_service.ListMetadataSchemasRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListMetadataSchemasAsyncPager: - r"""Lists MetadataSchemas. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListMetadataSchemasRequest, dict]): - The request object. Request message for - [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1.MetadataService.ListMetadataSchemas]. - parent (:class:`str`): - Required. The MetadataStore whose MetadataSchemas should - be listed. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.metadata_service.pagers.ListMetadataSchemasAsyncPager: - Response message for - [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1.MetadataService.ListMetadataSchemas]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.ListMetadataSchemasRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_metadata_schemas, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListMetadataSchemasAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def query_artifact_lineage_subgraph(self, - request: Union[metadata_service.QueryArtifactLineageSubgraphRequest, dict] = None, - *, - artifact: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> lineage_subgraph.LineageSubgraph: - r"""Retrieves lineage of an Artifact represented through - Artifacts and Executions connected by Event edges and - returned as a LineageSubgraph. - - Args: - request (Union[google.cloud.aiplatform_v1.types.QueryArtifactLineageSubgraphRequest, dict]): - The request object. Request message for - [MetadataService.QueryArtifactLineageSubgraph][google.cloud.aiplatform.v1.MetadataService.QueryArtifactLineageSubgraph]. - artifact (:class:`str`): - Required. The resource name of the Artifact whose - Lineage needs to be retrieved as a LineageSubgraph. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` - - The request may error with FAILED_PRECONDITION if the - number of Artifacts, the number of Executions, or the - number of Events that would be returned for the Context - exceeds 1000. - - This corresponds to the ``artifact`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.LineageSubgraph: - A subgraph of the overall lineage - graph. Event edges connect Artifact and - Execution nodes. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([artifact]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.QueryArtifactLineageSubgraphRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if artifact is not None: - request.artifact = artifact - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.query_artifact_lineage_subgraph, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("artifact", request.artifact), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def __aenter__(self): - return self - - async def __aexit__(self, exc_type, exc, tb): - await self.transport.close() - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "MetadataServiceAsyncClient", -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/client.py deleted file mode 100644 index 23c80f5b1b..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/client.py +++ /dev/null @@ -1,3214 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import os -import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1.services.metadata_service import pagers -from google.cloud.aiplatform_v1.types import artifact -from google.cloud.aiplatform_v1.types import artifact as gca_artifact -from google.cloud.aiplatform_v1.types import context -from google.cloud.aiplatform_v1.types import context as gca_context -from google.cloud.aiplatform_v1.types import encryption_spec -from google.cloud.aiplatform_v1.types import event -from google.cloud.aiplatform_v1.types import execution -from google.cloud.aiplatform_v1.types import execution as gca_execution -from google.cloud.aiplatform_v1.types import lineage_subgraph -from google.cloud.aiplatform_v1.types import metadata_schema -from google.cloud.aiplatform_v1.types import metadata_schema as gca_metadata_schema -from google.cloud.aiplatform_v1.types import metadata_service -from google.cloud.aiplatform_v1.types import metadata_store -from google.cloud.aiplatform_v1.types import metadata_store as gca_metadata_store -from google.cloud.aiplatform_v1.types import operation as gca_operation -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import MetadataServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import MetadataServiceGrpcTransport -from .transports.grpc_asyncio import MetadataServiceGrpcAsyncIOTransport - - -class MetadataServiceClientMeta(type): - """Metaclass for the MetadataService client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[MetadataServiceTransport]] - _transport_registry["grpc"] = MetadataServiceGrpcTransport - _transport_registry["grpc_asyncio"] = MetadataServiceGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[MetadataServiceTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class MetadataServiceClient(metaclass=MetadataServiceClientMeta): - """Service for reading and writing metadata entries.""" - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "aiplatform.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - MetadataServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - MetadataServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> MetadataServiceTransport: - """Returns the transport used by the client instance. - - Returns: - MetadataServiceTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def artifact_path(project: str,location: str,metadata_store: str,artifact: str,) -> str: - """Returns a fully-qualified artifact string.""" - return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}".format(project=project, location=location, metadata_store=metadata_store, artifact=artifact, ) - - @staticmethod - def parse_artifact_path(path: str) -> Dict[str,str]: - """Parses a artifact path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/artifacts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def context_path(project: str,location: str,metadata_store: str,context: str,) -> str: - """Returns a fully-qualified context string.""" - return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}".format(project=project, location=location, metadata_store=metadata_store, context=context, ) - - @staticmethod - def parse_context_path(path: str) -> Dict[str,str]: - """Parses a context path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/contexts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def execution_path(project: str,location: str,metadata_store: str,execution: str,) -> str: - """Returns a fully-qualified execution string.""" - return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}".format(project=project, location=location, metadata_store=metadata_store, execution=execution, ) - - @staticmethod - def parse_execution_path(path: str) -> Dict[str,str]: - """Parses a execution path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/executions/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def metadata_schema_path(project: str,location: str,metadata_store: str,metadata_schema: str,) -> str: - """Returns a fully-qualified metadata_schema string.""" - return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/metadataSchemas/{metadata_schema}".format(project=project, location=location, metadata_store=metadata_store, metadata_schema=metadata_schema, ) - - @staticmethod - def parse_metadata_schema_path(path: str) -> Dict[str,str]: - """Parses a metadata_schema path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/metadataSchemas/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def metadata_store_path(project: str,location: str,metadata_store: str,) -> str: - """Returns a fully-qualified metadata_store string.""" - return "projects/{project}/locations/{location}/metadataStores/{metadata_store}".format(project=project, location=location, metadata_store=metadata_store, ) - - @staticmethod - def parse_metadata_store_path(path: str) -> Dict[str,str]: - """Parses a metadata_store path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, MetadataServiceTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the metadata service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, MetadataServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): - raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, MetadataServiceTransport): - # transport is a MetadataServiceTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - always_use_jwt_access=True, - ) - - def create_metadata_store(self, - request: Union[metadata_service.CreateMetadataStoreRequest, dict] = None, - *, - parent: str = None, - metadata_store: gca_metadata_store.MetadataStore = None, - metadata_store_id: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Initializes a MetadataStore, including allocation of - resources. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CreateMetadataStoreRequest, dict]): - The request object. Request message for - [MetadataService.CreateMetadataStore][google.cloud.aiplatform.v1.MetadataService.CreateMetadataStore]. - parent (str): - Required. The resource name of the Location where the - MetadataStore should be created. Format: - ``projects/{project}/locations/{location}/`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - metadata_store (google.cloud.aiplatform_v1.types.MetadataStore): - Required. The MetadataStore to - create. - - This corresponds to the ``metadata_store`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - metadata_store_id (str): - The {metadatastore} portion of the resource name with - the format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - If not provided, the MetadataStore's ID will be a UUID - generated by the service. Must be 4-128 characters in - length. Valid characters are ``/[a-z][0-9]-/``. Must be - unique across all MetadataStores in the parent Location. - (Otherwise the request will fail with ALREADY_EXISTS, or - PERMISSION_DENIED if the caller can't view the - preexisting MetadataStore.) - - This corresponds to the ``metadata_store_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.MetadataStore` Instance of a metadata store. Contains a set of metadata that can be - queried. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, metadata_store, metadata_store_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.CreateMetadataStoreRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.CreateMetadataStoreRequest): - request = metadata_service.CreateMetadataStoreRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if metadata_store is not None: - request.metadata_store = metadata_store - if metadata_store_id is not None: - request.metadata_store_id = metadata_store_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_metadata_store] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - gca_metadata_store.MetadataStore, - metadata_type=metadata_service.CreateMetadataStoreOperationMetadata, - ) - - # Done; return the response. - return response - - def get_metadata_store(self, - request: Union[metadata_service.GetMetadataStoreRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> metadata_store.MetadataStore: - r"""Retrieves a specific MetadataStore. - - Args: - request (Union[google.cloud.aiplatform_v1.types.GetMetadataStoreRequest, dict]): - The request object. Request message for - [MetadataService.GetMetadataStore][google.cloud.aiplatform.v1.MetadataService.GetMetadataStore]. - name (str): - Required. The resource name of the MetadataStore to - retrieve. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.MetadataStore: - Instance of a metadata store. - Contains a set of metadata that can be - queried. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.GetMetadataStoreRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.GetMetadataStoreRequest): - request = metadata_service.GetMetadataStoreRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_metadata_store] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_metadata_stores(self, - request: Union[metadata_service.ListMetadataStoresRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListMetadataStoresPager: - r"""Lists MetadataStores for a Location. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListMetadataStoresRequest, dict]): - The request object. Request message for - [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1.MetadataService.ListMetadataStores]. - parent (str): - Required. The Location whose MetadataStores should be - listed. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.metadata_service.pagers.ListMetadataStoresPager: - Response message for - [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1.MetadataService.ListMetadataStores]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.ListMetadataStoresRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.ListMetadataStoresRequest): - request = metadata_service.ListMetadataStoresRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_metadata_stores] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListMetadataStoresPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_metadata_store(self, - request: Union[metadata_service.DeleteMetadataStoreRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a single MetadataStore and all its child - resources (Artifacts, Executions, and Contexts). - - Args: - request (Union[google.cloud.aiplatform_v1.types.DeleteMetadataStoreRequest, dict]): - The request object. Request message for - [MetadataService.DeleteMetadataStore][google.cloud.aiplatform.v1.MetadataService.DeleteMetadataStore]. - name (str): - Required. The resource name of the MetadataStore to - delete. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.DeleteMetadataStoreRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.DeleteMetadataStoreRequest): - request = metadata_service.DeleteMetadataStoreRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_metadata_store] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=metadata_service.DeleteMetadataStoreOperationMetadata, - ) - - # Done; return the response. - return response - - def create_artifact(self, - request: Union[metadata_service.CreateArtifactRequest, dict] = None, - *, - parent: str = None, - artifact: gca_artifact.Artifact = None, - artifact_id: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_artifact.Artifact: - r"""Creates an Artifact associated with a MetadataStore. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CreateArtifactRequest, dict]): - The request object. Request message for - [MetadataService.CreateArtifact][google.cloud.aiplatform.v1.MetadataService.CreateArtifact]. - parent (str): - Required. The resource name of the MetadataStore where - the Artifact should be created. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - artifact (google.cloud.aiplatform_v1.types.Artifact): - Required. The Artifact to create. - This corresponds to the ``artifact`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - artifact_id (str): - The {artifact} portion of the resource name with the - format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` - If not provided, the Artifact's ID will be a UUID - generated by the service. Must be 4-128 characters in - length. Valid characters are ``/[a-z][0-9]-/``. Must be - unique across all Artifacts in the parent MetadataStore. - (Otherwise the request will fail with ALREADY_EXISTS, or - PERMISSION_DENIED if the caller can't view the - preexisting Artifact.) - - This corresponds to the ``artifact_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Artifact: - Instance of a general artifact. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, artifact, artifact_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.CreateArtifactRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.CreateArtifactRequest): - request = metadata_service.CreateArtifactRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if artifact is not None: - request.artifact = artifact - if artifact_id is not None: - request.artifact_id = artifact_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_artifact] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_artifact(self, - request: Union[metadata_service.GetArtifactRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> artifact.Artifact: - r"""Retrieves a specific Artifact. - - Args: - request (Union[google.cloud.aiplatform_v1.types.GetArtifactRequest, dict]): - The request object. Request message for - [MetadataService.GetArtifact][google.cloud.aiplatform.v1.MetadataService.GetArtifact]. - name (str): - Required. The resource name of the Artifact to retrieve. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Artifact: - Instance of a general artifact. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.GetArtifactRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.GetArtifactRequest): - request = metadata_service.GetArtifactRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_artifact] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_artifacts(self, - request: Union[metadata_service.ListArtifactsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListArtifactsPager: - r"""Lists Artifacts in the MetadataStore. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListArtifactsRequest, dict]): - The request object. Request message for - [MetadataService.ListArtifacts][google.cloud.aiplatform.v1.MetadataService.ListArtifacts]. - parent (str): - Required. The MetadataStore whose Artifacts should be - listed. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.metadata_service.pagers.ListArtifactsPager: - Response message for - [MetadataService.ListArtifacts][google.cloud.aiplatform.v1.MetadataService.ListArtifacts]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.ListArtifactsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.ListArtifactsRequest): - request = metadata_service.ListArtifactsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_artifacts] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListArtifactsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_artifact(self, - request: Union[metadata_service.UpdateArtifactRequest, dict] = None, - *, - artifact: gca_artifact.Artifact = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_artifact.Artifact: - r"""Updates a stored Artifact. - - Args: - request (Union[google.cloud.aiplatform_v1.types.UpdateArtifactRequest, dict]): - The request object. Request message for - [MetadataService.UpdateArtifact][google.cloud.aiplatform.v1.MetadataService.UpdateArtifact]. - artifact (google.cloud.aiplatform_v1.types.Artifact): - Required. The Artifact containing updates. The - Artifact's - [Artifact.name][google.cloud.aiplatform.v1.Artifact.name] - field is used to identify the Artifact to be updated. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` - - This corresponds to the ``artifact`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. A FieldMask indicating - which fields should be updated. - Functionality of this field is not yet - supported. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Artifact: - Instance of a general artifact. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([artifact, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.UpdateArtifactRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.UpdateArtifactRequest): - request = metadata_service.UpdateArtifactRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if artifact is not None: - request.artifact = artifact - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_artifact] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("artifact.name", request.artifact.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_artifact(self, - request: Union[metadata_service.DeleteArtifactRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes an Artifact. - - Args: - request (Union[google.cloud.aiplatform_v1.types.DeleteArtifactRequest, dict]): - The request object. Request message for - [MetadataService.DeleteArtifact][google.cloud.aiplatform.v1.MetadataService.DeleteArtifact]. - name (str): - Required. The resource name of the Artifact to delete. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.DeleteArtifactRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.DeleteArtifactRequest): - request = metadata_service.DeleteArtifactRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_artifact] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def purge_artifacts(self, - request: Union[metadata_service.PurgeArtifactsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Purges Artifacts. - - Args: - request (Union[google.cloud.aiplatform_v1.types.PurgeArtifactsRequest, dict]): - The request object. Request message for - [MetadataService.PurgeArtifacts][google.cloud.aiplatform.v1.MetadataService.PurgeArtifacts]. - parent (str): - Required. The metadata store to purge Artifacts from. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1.types.PurgeArtifactsResponse` - Response message for - [MetadataService.PurgeArtifacts][google.cloud.aiplatform.v1.MetadataService.PurgeArtifacts]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.PurgeArtifactsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.PurgeArtifactsRequest): - request = metadata_service.PurgeArtifactsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.purge_artifacts] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - metadata_service.PurgeArtifactsResponse, - metadata_type=metadata_service.PurgeArtifactsMetadata, - ) - - # Done; return the response. - return response - - def create_context(self, - request: Union[metadata_service.CreateContextRequest, dict] = None, - *, - parent: str = None, - context: gca_context.Context = None, - context_id: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_context.Context: - r"""Creates a Context associated with a MetadataStore. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CreateContextRequest, dict]): - The request object. Request message for - [MetadataService.CreateContext][google.cloud.aiplatform.v1.MetadataService.CreateContext]. - parent (str): - Required. The resource name of the MetadataStore where - the Context should be created. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - context (google.cloud.aiplatform_v1.types.Context): - Required. The Context to create. - This corresponds to the ``context`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - context_id (str): - The {context} portion of the resource name with the - format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}``. - If not provided, the Context's ID will be a UUID - generated by the service. Must be 4-128 characters in - length. Valid characters are ``/[a-z][0-9]-/``. Must be - unique across all Contexts in the parent MetadataStore. - (Otherwise the request will fail with ALREADY_EXISTS, or - PERMISSION_DENIED if the caller can't view the - preexisting Context.) - - This corresponds to the ``context_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Context: - Instance of a general context. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, context, context_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.CreateContextRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.CreateContextRequest): - request = metadata_service.CreateContextRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if context is not None: - request.context = context - if context_id is not None: - request.context_id = context_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_context] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_context(self, - request: Union[metadata_service.GetContextRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> context.Context: - r"""Retrieves a specific Context. - - Args: - request (Union[google.cloud.aiplatform_v1.types.GetContextRequest, dict]): - The request object. Request message for - [MetadataService.GetContext][google.cloud.aiplatform.v1.MetadataService.GetContext]. - name (str): - Required. The resource name of the Context to retrieve. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Context: - Instance of a general context. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.GetContextRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.GetContextRequest): - request = metadata_service.GetContextRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_context] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_contexts(self, - request: Union[metadata_service.ListContextsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListContextsPager: - r"""Lists Contexts on the MetadataStore. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListContextsRequest, dict]): - The request object. Request message for - [MetadataService.ListContexts][google.cloud.aiplatform.v1.MetadataService.ListContexts] - parent (str): - Required. The MetadataStore whose Contexts should be - listed. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.metadata_service.pagers.ListContextsPager: - Response message for - [MetadataService.ListContexts][google.cloud.aiplatform.v1.MetadataService.ListContexts]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.ListContextsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.ListContextsRequest): - request = metadata_service.ListContextsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_contexts] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListContextsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_context(self, - request: Union[metadata_service.UpdateContextRequest, dict] = None, - *, - context: gca_context.Context = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_context.Context: - r"""Updates a stored Context. - - Args: - request (Union[google.cloud.aiplatform_v1.types.UpdateContextRequest, dict]): - The request object. Request message for - [MetadataService.UpdateContext][google.cloud.aiplatform.v1.MetadataService.UpdateContext]. - context (google.cloud.aiplatform_v1.types.Context): - Required. The Context containing updates. The Context's - [Context.name][google.cloud.aiplatform.v1.Context.name] - field is used to identify the Context to be updated. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` - - This corresponds to the ``context`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. A FieldMask indicating - which fields should be updated. - Functionality of this field is not yet - supported. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Context: - Instance of a general context. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([context, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.UpdateContextRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.UpdateContextRequest): - request = metadata_service.UpdateContextRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if context is not None: - request.context = context - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_context] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("context.name", request.context.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_context(self, - request: Union[metadata_service.DeleteContextRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a stored Context. - - Args: - request (Union[google.cloud.aiplatform_v1.types.DeleteContextRequest, dict]): - The request object. Request message for - [MetadataService.DeleteContext][google.cloud.aiplatform.v1.MetadataService.DeleteContext]. - name (str): - Required. The resource name of the Context to delete. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.DeleteContextRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.DeleteContextRequest): - request = metadata_service.DeleteContextRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_context] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def purge_contexts(self, - request: Union[metadata_service.PurgeContextsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Purges Contexts. - - Args: - request (Union[google.cloud.aiplatform_v1.types.PurgeContextsRequest, dict]): - The request object. Request message for - [MetadataService.PurgeContexts][google.cloud.aiplatform.v1.MetadataService.PurgeContexts]. - parent (str): - Required. The metadata store to purge Contexts from. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1.types.PurgeContextsResponse` - Response message for - [MetadataService.PurgeContexts][google.cloud.aiplatform.v1.MetadataService.PurgeContexts]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.PurgeContextsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.PurgeContextsRequest): - request = metadata_service.PurgeContextsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.purge_contexts] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - metadata_service.PurgeContextsResponse, - metadata_type=metadata_service.PurgeContextsMetadata, - ) - - # Done; return the response. - return response - - def add_context_artifacts_and_executions(self, - request: Union[metadata_service.AddContextArtifactsAndExecutionsRequest, dict] = None, - *, - context: str = None, - artifacts: Sequence[str] = None, - executions: Sequence[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> metadata_service.AddContextArtifactsAndExecutionsResponse: - r"""Adds a set of Artifacts and Executions to a Context. - If any of the Artifacts or Executions have already been - added to a Context, they are simply skipped. - - Args: - request (Union[google.cloud.aiplatform_v1.types.AddContextArtifactsAndExecutionsRequest, dict]): - The request object. Request message for - [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1.MetadataService.AddContextArtifactsAndExecutions]. - context (str): - Required. The resource name of the Context that the - Artifacts and Executions belong to. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` - - This corresponds to the ``context`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - artifacts (Sequence[str]): - The resource names of the Artifacts to attribute to the - Context. - - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` - - This corresponds to the ``artifacts`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - executions (Sequence[str]): - The resource names of the Executions to associate with - the Context. - - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` - - This corresponds to the ``executions`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.AddContextArtifactsAndExecutionsResponse: - Response message for - [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1.MetadataService.AddContextArtifactsAndExecutions]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([context, artifacts, executions]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.AddContextArtifactsAndExecutionsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.AddContextArtifactsAndExecutionsRequest): - request = metadata_service.AddContextArtifactsAndExecutionsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if context is not None: - request.context = context - if artifacts is not None: - request.artifacts = artifacts - if executions is not None: - request.executions = executions - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.add_context_artifacts_and_executions] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("context", request.context), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def add_context_children(self, - request: Union[metadata_service.AddContextChildrenRequest, dict] = None, - *, - context: str = None, - child_contexts: Sequence[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> metadata_service.AddContextChildrenResponse: - r"""Adds a set of Contexts as children to a parent Context. If any - of the child Contexts have already been added to the parent - Context, they are simply skipped. If this call would create a - cycle or cause any Context to have more than 10 parents, the - request will fail with an INVALID_ARGUMENT error. - - Args: - request (Union[google.cloud.aiplatform_v1.types.AddContextChildrenRequest, dict]): - The request object. Request message for - [MetadataService.AddContextChildren][google.cloud.aiplatform.v1.MetadataService.AddContextChildren]. - context (str): - Required. The resource name of the parent Context. - - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` - - This corresponds to the ``context`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - child_contexts (Sequence[str]): - The resource names of the child - Contexts. - - This corresponds to the ``child_contexts`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.AddContextChildrenResponse: - Response message for - [MetadataService.AddContextChildren][google.cloud.aiplatform.v1.MetadataService.AddContextChildren]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([context, child_contexts]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.AddContextChildrenRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.AddContextChildrenRequest): - request = metadata_service.AddContextChildrenRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if context is not None: - request.context = context - if child_contexts is not None: - request.child_contexts = child_contexts - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.add_context_children] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("context", request.context), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def query_context_lineage_subgraph(self, - request: Union[metadata_service.QueryContextLineageSubgraphRequest, dict] = None, - *, - context: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> lineage_subgraph.LineageSubgraph: - r"""Retrieves Artifacts and Executions within the - specified Context, connected by Event edges and returned - as a LineageSubgraph. - - Args: - request (Union[google.cloud.aiplatform_v1.types.QueryContextLineageSubgraphRequest, dict]): - The request object. Request message for - [MetadataService.QueryContextLineageSubgraph][google.cloud.aiplatform.v1.MetadataService.QueryContextLineageSubgraph]. - context (str): - Required. The resource name of the Context whose - Artifacts and Executions should be retrieved as a - LineageSubgraph. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` - - The request may error with FAILED_PRECONDITION if the - number of Artifacts, the number of Executions, or the - number of Events that would be returned for the Context - exceeds 1000. - - This corresponds to the ``context`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.LineageSubgraph: - A subgraph of the overall lineage - graph. Event edges connect Artifact and - Execution nodes. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([context]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.QueryContextLineageSubgraphRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.QueryContextLineageSubgraphRequest): - request = metadata_service.QueryContextLineageSubgraphRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if context is not None: - request.context = context - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.query_context_lineage_subgraph] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("context", request.context), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def create_execution(self, - request: Union[metadata_service.CreateExecutionRequest, dict] = None, - *, - parent: str = None, - execution: gca_execution.Execution = None, - execution_id: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_execution.Execution: - r"""Creates an Execution associated with a MetadataStore. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CreateExecutionRequest, dict]): - The request object. Request message for - [MetadataService.CreateExecution][google.cloud.aiplatform.v1.MetadataService.CreateExecution]. - parent (str): - Required. The resource name of the MetadataStore where - the Execution should be created. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - execution (google.cloud.aiplatform_v1.types.Execution): - Required. The Execution to create. - This corresponds to the ``execution`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - execution_id (str): - The {execution} portion of the resource name with the - format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` - If not provided, the Execution's ID will be a UUID - generated by the service. Must be 4-128 characters in - length. Valid characters are ``/[a-z][0-9]-/``. Must be - unique across all Executions in the parent - MetadataStore. (Otherwise the request will fail with - ALREADY_EXISTS, or PERMISSION_DENIED if the caller can't - view the preexisting Execution.) - - This corresponds to the ``execution_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Execution: - Instance of a general execution. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, execution, execution_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.CreateExecutionRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.CreateExecutionRequest): - request = metadata_service.CreateExecutionRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if execution is not None: - request.execution = execution - if execution_id is not None: - request.execution_id = execution_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_execution] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_execution(self, - request: Union[metadata_service.GetExecutionRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> execution.Execution: - r"""Retrieves a specific Execution. - - Args: - request (Union[google.cloud.aiplatform_v1.types.GetExecutionRequest, dict]): - The request object. Request message for - [MetadataService.GetExecution][google.cloud.aiplatform.v1.MetadataService.GetExecution]. - name (str): - Required. The resource name of the Execution to - retrieve. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Execution: - Instance of a general execution. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.GetExecutionRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.GetExecutionRequest): - request = metadata_service.GetExecutionRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_execution] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_executions(self, - request: Union[metadata_service.ListExecutionsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListExecutionsPager: - r"""Lists Executions in the MetadataStore. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListExecutionsRequest, dict]): - The request object. Request message for - [MetadataService.ListExecutions][google.cloud.aiplatform.v1.MetadataService.ListExecutions]. - parent (str): - Required. The MetadataStore whose Executions should be - listed. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.metadata_service.pagers.ListExecutionsPager: - Response message for - [MetadataService.ListExecutions][google.cloud.aiplatform.v1.MetadataService.ListExecutions]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.ListExecutionsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.ListExecutionsRequest): - request = metadata_service.ListExecutionsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_executions] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListExecutionsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_execution(self, - request: Union[metadata_service.UpdateExecutionRequest, dict] = None, - *, - execution: gca_execution.Execution = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_execution.Execution: - r"""Updates a stored Execution. - - Args: - request (Union[google.cloud.aiplatform_v1.types.UpdateExecutionRequest, dict]): - The request object. Request message for - [MetadataService.UpdateExecution][google.cloud.aiplatform.v1.MetadataService.UpdateExecution]. - execution (google.cloud.aiplatform_v1.types.Execution): - Required. The Execution containing updates. The - Execution's - [Execution.name][google.cloud.aiplatform.v1.Execution.name] - field is used to identify the Execution to be updated. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` - - This corresponds to the ``execution`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. A FieldMask indicating - which fields should be updated. - Functionality of this field is not yet - supported. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Execution: - Instance of a general execution. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([execution, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.UpdateExecutionRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.UpdateExecutionRequest): - request = metadata_service.UpdateExecutionRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if execution is not None: - request.execution = execution - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_execution] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("execution.name", request.execution.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_execution(self, - request: Union[metadata_service.DeleteExecutionRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes an Execution. - - Args: - request (Union[google.cloud.aiplatform_v1.types.DeleteExecutionRequest, dict]): - The request object. Request message for - [MetadataService.DeleteExecution][google.cloud.aiplatform.v1.MetadataService.DeleteExecution]. - name (str): - Required. The resource name of the Execution to delete. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.DeleteExecutionRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.DeleteExecutionRequest): - request = metadata_service.DeleteExecutionRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_execution] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def purge_executions(self, - request: Union[metadata_service.PurgeExecutionsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Purges Executions. - - Args: - request (Union[google.cloud.aiplatform_v1.types.PurgeExecutionsRequest, dict]): - The request object. Request message for - [MetadataService.PurgeExecutions][google.cloud.aiplatform.v1.MetadataService.PurgeExecutions]. - parent (str): - Required. The metadata store to purge Executions from. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1.types.PurgeExecutionsResponse` - Response message for - [MetadataService.PurgeExecutions][google.cloud.aiplatform.v1.MetadataService.PurgeExecutions]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.PurgeExecutionsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.PurgeExecutionsRequest): - request = metadata_service.PurgeExecutionsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.purge_executions] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - metadata_service.PurgeExecutionsResponse, - metadata_type=metadata_service.PurgeExecutionsMetadata, - ) - - # Done; return the response. - return response - - def add_execution_events(self, - request: Union[metadata_service.AddExecutionEventsRequest, dict] = None, - *, - execution: str = None, - events: Sequence[event.Event] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> metadata_service.AddExecutionEventsResponse: - r"""Adds Events to the specified Execution. An Event - indicates whether an Artifact was used as an input or - output for an Execution. If an Event already exists - between the Execution and the Artifact, the Event is - skipped. - - Args: - request (Union[google.cloud.aiplatform_v1.types.AddExecutionEventsRequest, dict]): - The request object. Request message for - [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1.MetadataService.AddExecutionEvents]. - execution (str): - Required. The resource name of the Execution that the - Events connect Artifacts with. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` - - This corresponds to the ``execution`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - events (Sequence[google.cloud.aiplatform_v1.types.Event]): - The Events to create and add. - This corresponds to the ``events`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.AddExecutionEventsResponse: - Response message for - [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1.MetadataService.AddExecutionEvents]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([execution, events]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.AddExecutionEventsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.AddExecutionEventsRequest): - request = metadata_service.AddExecutionEventsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if execution is not None: - request.execution = execution - if events is not None: - request.events = events - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.add_execution_events] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("execution", request.execution), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def query_execution_inputs_and_outputs(self, - request: Union[metadata_service.QueryExecutionInputsAndOutputsRequest, dict] = None, - *, - execution: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> lineage_subgraph.LineageSubgraph: - r"""Obtains the set of input and output Artifacts for - this Execution, in the form of LineageSubgraph that also - contains the Execution and connecting Events. - - Args: - request (Union[google.cloud.aiplatform_v1.types.QueryExecutionInputsAndOutputsRequest, dict]): - The request object. Request message for - [MetadataService.QueryExecutionInputsAndOutputs][google.cloud.aiplatform.v1.MetadataService.QueryExecutionInputsAndOutputs]. - execution (str): - Required. The resource name of the Execution whose input - and output Artifacts should be retrieved as a - LineageSubgraph. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` - - This corresponds to the ``execution`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.LineageSubgraph: - A subgraph of the overall lineage - graph. Event edges connect Artifact and - Execution nodes. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([execution]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.QueryExecutionInputsAndOutputsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.QueryExecutionInputsAndOutputsRequest): - request = metadata_service.QueryExecutionInputsAndOutputsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if execution is not None: - request.execution = execution - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.query_execution_inputs_and_outputs] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("execution", request.execution), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def create_metadata_schema(self, - request: Union[metadata_service.CreateMetadataSchemaRequest, dict] = None, - *, - parent: str = None, - metadata_schema: gca_metadata_schema.MetadataSchema = None, - metadata_schema_id: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_metadata_schema.MetadataSchema: - r"""Creates a MetadataSchema. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CreateMetadataSchemaRequest, dict]): - The request object. Request message for - [MetadataService.CreateMetadataSchema][google.cloud.aiplatform.v1.MetadataService.CreateMetadataSchema]. - parent (str): - Required. The resource name of the MetadataStore where - the MetadataSchema should be created. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - metadata_schema (google.cloud.aiplatform_v1.types.MetadataSchema): - Required. The MetadataSchema to - create. - - This corresponds to the ``metadata_schema`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - metadata_schema_id (str): - The {metadata_schema} portion of the resource name with - the format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema}`` - If not provided, the MetadataStore's ID will be a UUID - generated by the service. Must be 4-128 characters in - length. Valid characters are ``/[a-z][0-9]-/``. Must be - unique across all MetadataSchemas in the parent - Location. (Otherwise the request will fail with - ALREADY_EXISTS, or PERMISSION_DENIED if the caller can't - view the preexisting MetadataSchema.) - - This corresponds to the ``metadata_schema_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.MetadataSchema: - Instance of a general MetadataSchema. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, metadata_schema, metadata_schema_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.CreateMetadataSchemaRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.CreateMetadataSchemaRequest): - request = metadata_service.CreateMetadataSchemaRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if metadata_schema is not None: - request.metadata_schema = metadata_schema - if metadata_schema_id is not None: - request.metadata_schema_id = metadata_schema_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_metadata_schema] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_metadata_schema(self, - request: Union[metadata_service.GetMetadataSchemaRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> metadata_schema.MetadataSchema: - r"""Retrieves a specific MetadataSchema. - - Args: - request (Union[google.cloud.aiplatform_v1.types.GetMetadataSchemaRequest, dict]): - The request object. Request message for - [MetadataService.GetMetadataSchema][google.cloud.aiplatform.v1.MetadataService.GetMetadataSchema]. - name (str): - Required. The resource name of the MetadataSchema to - retrieve. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.MetadataSchema: - Instance of a general MetadataSchema. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.GetMetadataSchemaRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.GetMetadataSchemaRequest): - request = metadata_service.GetMetadataSchemaRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_metadata_schema] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_metadata_schemas(self, - request: Union[metadata_service.ListMetadataSchemasRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListMetadataSchemasPager: - r"""Lists MetadataSchemas. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListMetadataSchemasRequest, dict]): - The request object. Request message for - [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1.MetadataService.ListMetadataSchemas]. - parent (str): - Required. The MetadataStore whose MetadataSchemas should - be listed. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.metadata_service.pagers.ListMetadataSchemasPager: - Response message for - [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1.MetadataService.ListMetadataSchemas]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.ListMetadataSchemasRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.ListMetadataSchemasRequest): - request = metadata_service.ListMetadataSchemasRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_metadata_schemas] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListMetadataSchemasPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def query_artifact_lineage_subgraph(self, - request: Union[metadata_service.QueryArtifactLineageSubgraphRequest, dict] = None, - *, - artifact: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> lineage_subgraph.LineageSubgraph: - r"""Retrieves lineage of an Artifact represented through - Artifacts and Executions connected by Event edges and - returned as a LineageSubgraph. - - Args: - request (Union[google.cloud.aiplatform_v1.types.QueryArtifactLineageSubgraphRequest, dict]): - The request object. Request message for - [MetadataService.QueryArtifactLineageSubgraph][google.cloud.aiplatform.v1.MetadataService.QueryArtifactLineageSubgraph]. - artifact (str): - Required. The resource name of the Artifact whose - Lineage needs to be retrieved as a LineageSubgraph. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` - - The request may error with FAILED_PRECONDITION if the - number of Artifacts, the number of Executions, or the - number of Events that would be returned for the Context - exceeds 1000. - - This corresponds to the ``artifact`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.LineageSubgraph: - A subgraph of the overall lineage - graph. Event edges connect Artifact and - Execution nodes. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([artifact]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.QueryArtifactLineageSubgraphRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.QueryArtifactLineageSubgraphRequest): - request = metadata_service.QueryArtifactLineageSubgraphRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if artifact is not None: - request.artifact = artifact - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.query_artifact_lineage_subgraph] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("artifact", request.artifact), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - """Releases underlying transport's resources. - - .. warning:: - ONLY use as a context manager if the transport is NOT shared - with other clients! Exiting the with block will CLOSE the transport - and may cause errors in other clients! - """ - self.transport.close() - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "MetadataServiceClient", -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/pagers.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/pagers.py deleted file mode 100644 index 15301ceb29..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/pagers.py +++ /dev/null @@ -1,633 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator - -from google.cloud.aiplatform_v1.types import artifact -from google.cloud.aiplatform_v1.types import context -from google.cloud.aiplatform_v1.types import execution -from google.cloud.aiplatform_v1.types import metadata_schema -from google.cloud.aiplatform_v1.types import metadata_service -from google.cloud.aiplatform_v1.types import metadata_store - - -class ListMetadataStoresPager: - """A pager for iterating through ``list_metadata_stores`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListMetadataStoresResponse` object, and - provides an ``__iter__`` method to iterate through its - ``metadata_stores`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListMetadataStores`` requests and continue to iterate - through the ``metadata_stores`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListMetadataStoresResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., metadata_service.ListMetadataStoresResponse], - request: metadata_service.ListMetadataStoresRequest, - response: metadata_service.ListMetadataStoresResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListMetadataStoresRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListMetadataStoresResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = metadata_service.ListMetadataStoresRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[metadata_service.ListMetadataStoresResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[metadata_store.MetadataStore]: - for page in self.pages: - yield from page.metadata_stores - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListMetadataStoresAsyncPager: - """A pager for iterating through ``list_metadata_stores`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListMetadataStoresResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``metadata_stores`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListMetadataStores`` requests and continue to iterate - through the ``metadata_stores`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListMetadataStoresResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[metadata_service.ListMetadataStoresResponse]], - request: metadata_service.ListMetadataStoresRequest, - response: metadata_service.ListMetadataStoresResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListMetadataStoresRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListMetadataStoresResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = metadata_service.ListMetadataStoresRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[metadata_service.ListMetadataStoresResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[metadata_store.MetadataStore]: - async def async_generator(): - async for page in self.pages: - for response in page.metadata_stores: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListArtifactsPager: - """A pager for iterating through ``list_artifacts`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListArtifactsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``artifacts`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListArtifacts`` requests and continue to iterate - through the ``artifacts`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListArtifactsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., metadata_service.ListArtifactsResponse], - request: metadata_service.ListArtifactsRequest, - response: metadata_service.ListArtifactsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListArtifactsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListArtifactsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = metadata_service.ListArtifactsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[metadata_service.ListArtifactsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[artifact.Artifact]: - for page in self.pages: - yield from page.artifacts - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListArtifactsAsyncPager: - """A pager for iterating through ``list_artifacts`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListArtifactsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``artifacts`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListArtifacts`` requests and continue to iterate - through the ``artifacts`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListArtifactsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[metadata_service.ListArtifactsResponse]], - request: metadata_service.ListArtifactsRequest, - response: metadata_service.ListArtifactsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListArtifactsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListArtifactsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = metadata_service.ListArtifactsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[metadata_service.ListArtifactsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[artifact.Artifact]: - async def async_generator(): - async for page in self.pages: - for response in page.artifacts: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListContextsPager: - """A pager for iterating through ``list_contexts`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListContextsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``contexts`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListContexts`` requests and continue to iterate - through the ``contexts`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListContextsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., metadata_service.ListContextsResponse], - request: metadata_service.ListContextsRequest, - response: metadata_service.ListContextsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListContextsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListContextsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = metadata_service.ListContextsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[metadata_service.ListContextsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[context.Context]: - for page in self.pages: - yield from page.contexts - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListContextsAsyncPager: - """A pager for iterating through ``list_contexts`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListContextsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``contexts`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListContexts`` requests and continue to iterate - through the ``contexts`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListContextsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[metadata_service.ListContextsResponse]], - request: metadata_service.ListContextsRequest, - response: metadata_service.ListContextsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListContextsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListContextsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = metadata_service.ListContextsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[metadata_service.ListContextsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[context.Context]: - async def async_generator(): - async for page in self.pages: - for response in page.contexts: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListExecutionsPager: - """A pager for iterating through ``list_executions`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListExecutionsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``executions`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListExecutions`` requests and continue to iterate - through the ``executions`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListExecutionsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., metadata_service.ListExecutionsResponse], - request: metadata_service.ListExecutionsRequest, - response: metadata_service.ListExecutionsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListExecutionsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListExecutionsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = metadata_service.ListExecutionsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[metadata_service.ListExecutionsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[execution.Execution]: - for page in self.pages: - yield from page.executions - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListExecutionsAsyncPager: - """A pager for iterating through ``list_executions`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListExecutionsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``executions`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListExecutions`` requests and continue to iterate - through the ``executions`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListExecutionsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[metadata_service.ListExecutionsResponse]], - request: metadata_service.ListExecutionsRequest, - response: metadata_service.ListExecutionsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListExecutionsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListExecutionsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = metadata_service.ListExecutionsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[metadata_service.ListExecutionsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[execution.Execution]: - async def async_generator(): - async for page in self.pages: - for response in page.executions: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListMetadataSchemasPager: - """A pager for iterating through ``list_metadata_schemas`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListMetadataSchemasResponse` object, and - provides an ``__iter__`` method to iterate through its - ``metadata_schemas`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListMetadataSchemas`` requests and continue to iterate - through the ``metadata_schemas`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListMetadataSchemasResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., metadata_service.ListMetadataSchemasResponse], - request: metadata_service.ListMetadataSchemasRequest, - response: metadata_service.ListMetadataSchemasResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListMetadataSchemasRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListMetadataSchemasResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = metadata_service.ListMetadataSchemasRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[metadata_service.ListMetadataSchemasResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[metadata_schema.MetadataSchema]: - for page in self.pages: - yield from page.metadata_schemas - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListMetadataSchemasAsyncPager: - """A pager for iterating through ``list_metadata_schemas`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListMetadataSchemasResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``metadata_schemas`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListMetadataSchemas`` requests and continue to iterate - through the ``metadata_schemas`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListMetadataSchemasResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[metadata_service.ListMetadataSchemasResponse]], - request: metadata_service.ListMetadataSchemasRequest, - response: metadata_service.ListMetadataSchemasResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListMetadataSchemasRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListMetadataSchemasResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = metadata_service.ListMetadataSchemasRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[metadata_service.ListMetadataSchemasResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[metadata_schema.MetadataSchema]: - async def async_generator(): - async for page in self.pages: - for response in page.metadata_schemas: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/transports/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/transports/__init__.py deleted file mode 100644 index 688ce8218c..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import MetadataServiceTransport -from .grpc import MetadataServiceGrpcTransport -from .grpc_asyncio import MetadataServiceGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[MetadataServiceTransport]] -_transport_registry['grpc'] = MetadataServiceGrpcTransport -_transport_registry['grpc_asyncio'] = MetadataServiceGrpcAsyncIOTransport - -__all__ = ( - 'MetadataServiceTransport', - 'MetadataServiceGrpcTransport', - 'MetadataServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/transports/base.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/transports/base.py deleted file mode 100644 index 59adfffe25..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/transports/base.py +++ /dev/null @@ -1,583 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import pkg_resources - -import google.auth # type: ignore -import google.api_core -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.aiplatform_v1.types import artifact -from google.cloud.aiplatform_v1.types import artifact as gca_artifact -from google.cloud.aiplatform_v1.types import context -from google.cloud.aiplatform_v1.types import context as gca_context -from google.cloud.aiplatform_v1.types import execution -from google.cloud.aiplatform_v1.types import execution as gca_execution -from google.cloud.aiplatform_v1.types import lineage_subgraph -from google.cloud.aiplatform_v1.types import metadata_schema -from google.cloud.aiplatform_v1.types import metadata_schema as gca_metadata_schema -from google.cloud.aiplatform_v1.types import metadata_service -from google.cloud.aiplatform_v1.types import metadata_store -from google.longrunning import operations_pb2 # type: ignore - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-aiplatform', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -class MetadataServiceTransport(abc.ABC): - """Abstract transport class for MetadataService.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'aiplatform.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials are service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.create_metadata_store: gapic_v1.method.wrap_method( - self.create_metadata_store, - default_timeout=None, - client_info=client_info, - ), - self.get_metadata_store: gapic_v1.method.wrap_method( - self.get_metadata_store, - default_timeout=None, - client_info=client_info, - ), - self.list_metadata_stores: gapic_v1.method.wrap_method( - self.list_metadata_stores, - default_timeout=None, - client_info=client_info, - ), - self.delete_metadata_store: gapic_v1.method.wrap_method( - self.delete_metadata_store, - default_timeout=None, - client_info=client_info, - ), - self.create_artifact: gapic_v1.method.wrap_method( - self.create_artifact, - default_timeout=None, - client_info=client_info, - ), - self.get_artifact: gapic_v1.method.wrap_method( - self.get_artifact, - default_timeout=None, - client_info=client_info, - ), - self.list_artifacts: gapic_v1.method.wrap_method( - self.list_artifacts, - default_timeout=None, - client_info=client_info, - ), - self.update_artifact: gapic_v1.method.wrap_method( - self.update_artifact, - default_timeout=None, - client_info=client_info, - ), - self.delete_artifact: gapic_v1.method.wrap_method( - self.delete_artifact, - default_timeout=None, - client_info=client_info, - ), - self.purge_artifacts: gapic_v1.method.wrap_method( - self.purge_artifacts, - default_timeout=None, - client_info=client_info, - ), - self.create_context: gapic_v1.method.wrap_method( - self.create_context, - default_timeout=None, - client_info=client_info, - ), - self.get_context: gapic_v1.method.wrap_method( - self.get_context, - default_timeout=None, - client_info=client_info, - ), - self.list_contexts: gapic_v1.method.wrap_method( - self.list_contexts, - default_timeout=None, - client_info=client_info, - ), - self.update_context: gapic_v1.method.wrap_method( - self.update_context, - default_timeout=None, - client_info=client_info, - ), - self.delete_context: gapic_v1.method.wrap_method( - self.delete_context, - default_timeout=None, - client_info=client_info, - ), - self.purge_contexts: gapic_v1.method.wrap_method( - self.purge_contexts, - default_timeout=None, - client_info=client_info, - ), - self.add_context_artifacts_and_executions: gapic_v1.method.wrap_method( - self.add_context_artifacts_and_executions, - default_timeout=None, - client_info=client_info, - ), - self.add_context_children: gapic_v1.method.wrap_method( - self.add_context_children, - default_timeout=None, - client_info=client_info, - ), - self.query_context_lineage_subgraph: gapic_v1.method.wrap_method( - self.query_context_lineage_subgraph, - default_timeout=None, - client_info=client_info, - ), - self.create_execution: gapic_v1.method.wrap_method( - self.create_execution, - default_timeout=None, - client_info=client_info, - ), - self.get_execution: gapic_v1.method.wrap_method( - self.get_execution, - default_timeout=None, - client_info=client_info, - ), - self.list_executions: gapic_v1.method.wrap_method( - self.list_executions, - default_timeout=None, - client_info=client_info, - ), - self.update_execution: gapic_v1.method.wrap_method( - self.update_execution, - default_timeout=None, - client_info=client_info, - ), - self.delete_execution: gapic_v1.method.wrap_method( - self.delete_execution, - default_timeout=None, - client_info=client_info, - ), - self.purge_executions: gapic_v1.method.wrap_method( - self.purge_executions, - default_timeout=None, - client_info=client_info, - ), - self.add_execution_events: gapic_v1.method.wrap_method( - self.add_execution_events, - default_timeout=None, - client_info=client_info, - ), - self.query_execution_inputs_and_outputs: gapic_v1.method.wrap_method( - self.query_execution_inputs_and_outputs, - default_timeout=None, - client_info=client_info, - ), - self.create_metadata_schema: gapic_v1.method.wrap_method( - self.create_metadata_schema, - default_timeout=None, - client_info=client_info, - ), - self.get_metadata_schema: gapic_v1.method.wrap_method( - self.get_metadata_schema, - default_timeout=None, - client_info=client_info, - ), - self.list_metadata_schemas: gapic_v1.method.wrap_method( - self.list_metadata_schemas, - default_timeout=None, - client_info=client_info, - ), - self.query_artifact_lineage_subgraph: gapic_v1.method.wrap_method( - self.query_artifact_lineage_subgraph, - default_timeout=None, - client_info=client_info, - ), - } - - def close(self): - """Closes resources associated with the transport. - - .. warning:: - Only call this method if the transport is NOT shared - with other clients - this may cause errors in other clients! - """ - raise NotImplementedError() - - @property - def operations_client(self): - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def create_metadata_store(self) -> Callable[ - [metadata_service.CreateMetadataStoreRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def get_metadata_store(self) -> Callable[ - [metadata_service.GetMetadataStoreRequest], - Union[ - metadata_store.MetadataStore, - Awaitable[metadata_store.MetadataStore] - ]]: - raise NotImplementedError() - - @property - def list_metadata_stores(self) -> Callable[ - [metadata_service.ListMetadataStoresRequest], - Union[ - metadata_service.ListMetadataStoresResponse, - Awaitable[metadata_service.ListMetadataStoresResponse] - ]]: - raise NotImplementedError() - - @property - def delete_metadata_store(self) -> Callable[ - [metadata_service.DeleteMetadataStoreRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def create_artifact(self) -> Callable[ - [metadata_service.CreateArtifactRequest], - Union[ - gca_artifact.Artifact, - Awaitable[gca_artifact.Artifact] - ]]: - raise NotImplementedError() - - @property - def get_artifact(self) -> Callable[ - [metadata_service.GetArtifactRequest], - Union[ - artifact.Artifact, - Awaitable[artifact.Artifact] - ]]: - raise NotImplementedError() - - @property - def list_artifacts(self) -> Callable[ - [metadata_service.ListArtifactsRequest], - Union[ - metadata_service.ListArtifactsResponse, - Awaitable[metadata_service.ListArtifactsResponse] - ]]: - raise NotImplementedError() - - @property - def update_artifact(self) -> Callable[ - [metadata_service.UpdateArtifactRequest], - Union[ - gca_artifact.Artifact, - Awaitable[gca_artifact.Artifact] - ]]: - raise NotImplementedError() - - @property - def delete_artifact(self) -> Callable[ - [metadata_service.DeleteArtifactRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def purge_artifacts(self) -> Callable[ - [metadata_service.PurgeArtifactsRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def create_context(self) -> Callable[ - [metadata_service.CreateContextRequest], - Union[ - gca_context.Context, - Awaitable[gca_context.Context] - ]]: - raise NotImplementedError() - - @property - def get_context(self) -> Callable[ - [metadata_service.GetContextRequest], - Union[ - context.Context, - Awaitable[context.Context] - ]]: - raise NotImplementedError() - - @property - def list_contexts(self) -> Callable[ - [metadata_service.ListContextsRequest], - Union[ - metadata_service.ListContextsResponse, - Awaitable[metadata_service.ListContextsResponse] - ]]: - raise NotImplementedError() - - @property - def update_context(self) -> Callable[ - [metadata_service.UpdateContextRequest], - Union[ - gca_context.Context, - Awaitable[gca_context.Context] - ]]: - raise NotImplementedError() - - @property - def delete_context(self) -> Callable[ - [metadata_service.DeleteContextRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def purge_contexts(self) -> Callable[ - [metadata_service.PurgeContextsRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def add_context_artifacts_and_executions(self) -> Callable[ - [metadata_service.AddContextArtifactsAndExecutionsRequest], - Union[ - metadata_service.AddContextArtifactsAndExecutionsResponse, - Awaitable[metadata_service.AddContextArtifactsAndExecutionsResponse] - ]]: - raise NotImplementedError() - - @property - def add_context_children(self) -> Callable[ - [metadata_service.AddContextChildrenRequest], - Union[ - metadata_service.AddContextChildrenResponse, - Awaitable[metadata_service.AddContextChildrenResponse] - ]]: - raise NotImplementedError() - - @property - def query_context_lineage_subgraph(self) -> Callable[ - [metadata_service.QueryContextLineageSubgraphRequest], - Union[ - lineage_subgraph.LineageSubgraph, - Awaitable[lineage_subgraph.LineageSubgraph] - ]]: - raise NotImplementedError() - - @property - def create_execution(self) -> Callable[ - [metadata_service.CreateExecutionRequest], - Union[ - gca_execution.Execution, - Awaitable[gca_execution.Execution] - ]]: - raise NotImplementedError() - - @property - def get_execution(self) -> Callable[ - [metadata_service.GetExecutionRequest], - Union[ - execution.Execution, - Awaitable[execution.Execution] - ]]: - raise NotImplementedError() - - @property - def list_executions(self) -> Callable[ - [metadata_service.ListExecutionsRequest], - Union[ - metadata_service.ListExecutionsResponse, - Awaitable[metadata_service.ListExecutionsResponse] - ]]: - raise NotImplementedError() - - @property - def update_execution(self) -> Callable[ - [metadata_service.UpdateExecutionRequest], - Union[ - gca_execution.Execution, - Awaitable[gca_execution.Execution] - ]]: - raise NotImplementedError() - - @property - def delete_execution(self) -> Callable[ - [metadata_service.DeleteExecutionRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def purge_executions(self) -> Callable[ - [metadata_service.PurgeExecutionsRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def add_execution_events(self) -> Callable[ - [metadata_service.AddExecutionEventsRequest], - Union[ - metadata_service.AddExecutionEventsResponse, - Awaitable[metadata_service.AddExecutionEventsResponse] - ]]: - raise NotImplementedError() - - @property - def query_execution_inputs_and_outputs(self) -> Callable[ - [metadata_service.QueryExecutionInputsAndOutputsRequest], - Union[ - lineage_subgraph.LineageSubgraph, - Awaitable[lineage_subgraph.LineageSubgraph] - ]]: - raise NotImplementedError() - - @property - def create_metadata_schema(self) -> Callable[ - [metadata_service.CreateMetadataSchemaRequest], - Union[ - gca_metadata_schema.MetadataSchema, - Awaitable[gca_metadata_schema.MetadataSchema] - ]]: - raise NotImplementedError() - - @property - def get_metadata_schema(self) -> Callable[ - [metadata_service.GetMetadataSchemaRequest], - Union[ - metadata_schema.MetadataSchema, - Awaitable[metadata_schema.MetadataSchema] - ]]: - raise NotImplementedError() - - @property - def list_metadata_schemas(self) -> Callable[ - [metadata_service.ListMetadataSchemasRequest], - Union[ - metadata_service.ListMetadataSchemasResponse, - Awaitable[metadata_service.ListMetadataSchemasResponse] - ]]: - raise NotImplementedError() - - @property - def query_artifact_lineage_subgraph(self) -> Callable[ - [metadata_service.QueryArtifactLineageSubgraphRequest], - Union[ - lineage_subgraph.LineageSubgraph, - Awaitable[lineage_subgraph.LineageSubgraph] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'MetadataServiceTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/transports/grpc.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/transports/grpc.py deleted file mode 100644 index e8f5f7a48b..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/transports/grpc.py +++ /dev/null @@ -1,1084 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers -from google.api_core import operations_v1 -from google.api_core import gapic_v1 -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.aiplatform_v1.types import artifact -from google.cloud.aiplatform_v1.types import artifact as gca_artifact -from google.cloud.aiplatform_v1.types import context -from google.cloud.aiplatform_v1.types import context as gca_context -from google.cloud.aiplatform_v1.types import execution -from google.cloud.aiplatform_v1.types import execution as gca_execution -from google.cloud.aiplatform_v1.types import lineage_subgraph -from google.cloud.aiplatform_v1.types import metadata_schema -from google.cloud.aiplatform_v1.types import metadata_schema as gca_metadata_schema -from google.cloud.aiplatform_v1.types import metadata_service -from google.cloud.aiplatform_v1.types import metadata_store -from google.longrunning import operations_pb2 # type: ignore -from .base import MetadataServiceTransport, DEFAULT_CLIENT_INFO - - -class MetadataServiceGrpcTransport(MetadataServiceTransport): - """gRPC backend transport for MetadataService. - - Service for reading and writing metadata entries. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_metadata_store(self) -> Callable[ - [metadata_service.CreateMetadataStoreRequest], - operations_pb2.Operation]: - r"""Return a callable for the create metadata store method over gRPC. - - Initializes a MetadataStore, including allocation of - resources. - - Returns: - Callable[[~.CreateMetadataStoreRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_metadata_store' not in self._stubs: - self._stubs['create_metadata_store'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/CreateMetadataStore', - request_serializer=metadata_service.CreateMetadataStoreRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_metadata_store'] - - @property - def get_metadata_store(self) -> Callable[ - [metadata_service.GetMetadataStoreRequest], - metadata_store.MetadataStore]: - r"""Return a callable for the get metadata store method over gRPC. - - Retrieves a specific MetadataStore. - - Returns: - Callable[[~.GetMetadataStoreRequest], - ~.MetadataStore]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_metadata_store' not in self._stubs: - self._stubs['get_metadata_store'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/GetMetadataStore', - request_serializer=metadata_service.GetMetadataStoreRequest.serialize, - response_deserializer=metadata_store.MetadataStore.deserialize, - ) - return self._stubs['get_metadata_store'] - - @property - def list_metadata_stores(self) -> Callable[ - [metadata_service.ListMetadataStoresRequest], - metadata_service.ListMetadataStoresResponse]: - r"""Return a callable for the list metadata stores method over gRPC. - - Lists MetadataStores for a Location. - - Returns: - Callable[[~.ListMetadataStoresRequest], - ~.ListMetadataStoresResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_metadata_stores' not in self._stubs: - self._stubs['list_metadata_stores'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/ListMetadataStores', - request_serializer=metadata_service.ListMetadataStoresRequest.serialize, - response_deserializer=metadata_service.ListMetadataStoresResponse.deserialize, - ) - return self._stubs['list_metadata_stores'] - - @property - def delete_metadata_store(self) -> Callable[ - [metadata_service.DeleteMetadataStoreRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete metadata store method over gRPC. - - Deletes a single MetadataStore and all its child - resources (Artifacts, Executions, and Contexts). - - Returns: - Callable[[~.DeleteMetadataStoreRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_metadata_store' not in self._stubs: - self._stubs['delete_metadata_store'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/DeleteMetadataStore', - request_serializer=metadata_service.DeleteMetadataStoreRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_metadata_store'] - - @property - def create_artifact(self) -> Callable[ - [metadata_service.CreateArtifactRequest], - gca_artifact.Artifact]: - r"""Return a callable for the create artifact method over gRPC. - - Creates an Artifact associated with a MetadataStore. - - Returns: - Callable[[~.CreateArtifactRequest], - ~.Artifact]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_artifact' not in self._stubs: - self._stubs['create_artifact'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/CreateArtifact', - request_serializer=metadata_service.CreateArtifactRequest.serialize, - response_deserializer=gca_artifact.Artifact.deserialize, - ) - return self._stubs['create_artifact'] - - @property - def get_artifact(self) -> Callable[ - [metadata_service.GetArtifactRequest], - artifact.Artifact]: - r"""Return a callable for the get artifact method over gRPC. - - Retrieves a specific Artifact. - - Returns: - Callable[[~.GetArtifactRequest], - ~.Artifact]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_artifact' not in self._stubs: - self._stubs['get_artifact'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/GetArtifact', - request_serializer=metadata_service.GetArtifactRequest.serialize, - response_deserializer=artifact.Artifact.deserialize, - ) - return self._stubs['get_artifact'] - - @property - def list_artifacts(self) -> Callable[ - [metadata_service.ListArtifactsRequest], - metadata_service.ListArtifactsResponse]: - r"""Return a callable for the list artifacts method over gRPC. - - Lists Artifacts in the MetadataStore. - - Returns: - Callable[[~.ListArtifactsRequest], - ~.ListArtifactsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_artifacts' not in self._stubs: - self._stubs['list_artifacts'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/ListArtifacts', - request_serializer=metadata_service.ListArtifactsRequest.serialize, - response_deserializer=metadata_service.ListArtifactsResponse.deserialize, - ) - return self._stubs['list_artifacts'] - - @property - def update_artifact(self) -> Callable[ - [metadata_service.UpdateArtifactRequest], - gca_artifact.Artifact]: - r"""Return a callable for the update artifact method over gRPC. - - Updates a stored Artifact. - - Returns: - Callable[[~.UpdateArtifactRequest], - ~.Artifact]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_artifact' not in self._stubs: - self._stubs['update_artifact'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/UpdateArtifact', - request_serializer=metadata_service.UpdateArtifactRequest.serialize, - response_deserializer=gca_artifact.Artifact.deserialize, - ) - return self._stubs['update_artifact'] - - @property - def delete_artifact(self) -> Callable[ - [metadata_service.DeleteArtifactRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete artifact method over gRPC. - - Deletes an Artifact. - - Returns: - Callable[[~.DeleteArtifactRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_artifact' not in self._stubs: - self._stubs['delete_artifact'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/DeleteArtifact', - request_serializer=metadata_service.DeleteArtifactRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_artifact'] - - @property - def purge_artifacts(self) -> Callable[ - [metadata_service.PurgeArtifactsRequest], - operations_pb2.Operation]: - r"""Return a callable for the purge artifacts method over gRPC. - - Purges Artifacts. - - Returns: - Callable[[~.PurgeArtifactsRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'purge_artifacts' not in self._stubs: - self._stubs['purge_artifacts'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/PurgeArtifacts', - request_serializer=metadata_service.PurgeArtifactsRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['purge_artifacts'] - - @property - def create_context(self) -> Callable[ - [metadata_service.CreateContextRequest], - gca_context.Context]: - r"""Return a callable for the create context method over gRPC. - - Creates a Context associated with a MetadataStore. - - Returns: - Callable[[~.CreateContextRequest], - ~.Context]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_context' not in self._stubs: - self._stubs['create_context'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/CreateContext', - request_serializer=metadata_service.CreateContextRequest.serialize, - response_deserializer=gca_context.Context.deserialize, - ) - return self._stubs['create_context'] - - @property - def get_context(self) -> Callable[ - [metadata_service.GetContextRequest], - context.Context]: - r"""Return a callable for the get context method over gRPC. - - Retrieves a specific Context. - - Returns: - Callable[[~.GetContextRequest], - ~.Context]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_context' not in self._stubs: - self._stubs['get_context'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/GetContext', - request_serializer=metadata_service.GetContextRequest.serialize, - response_deserializer=context.Context.deserialize, - ) - return self._stubs['get_context'] - - @property - def list_contexts(self) -> Callable[ - [metadata_service.ListContextsRequest], - metadata_service.ListContextsResponse]: - r"""Return a callable for the list contexts method over gRPC. - - Lists Contexts on the MetadataStore. - - Returns: - Callable[[~.ListContextsRequest], - ~.ListContextsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_contexts' not in self._stubs: - self._stubs['list_contexts'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/ListContexts', - request_serializer=metadata_service.ListContextsRequest.serialize, - response_deserializer=metadata_service.ListContextsResponse.deserialize, - ) - return self._stubs['list_contexts'] - - @property - def update_context(self) -> Callable[ - [metadata_service.UpdateContextRequest], - gca_context.Context]: - r"""Return a callable for the update context method over gRPC. - - Updates a stored Context. - - Returns: - Callable[[~.UpdateContextRequest], - ~.Context]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_context' not in self._stubs: - self._stubs['update_context'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/UpdateContext', - request_serializer=metadata_service.UpdateContextRequest.serialize, - response_deserializer=gca_context.Context.deserialize, - ) - return self._stubs['update_context'] - - @property - def delete_context(self) -> Callable[ - [metadata_service.DeleteContextRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete context method over gRPC. - - Deletes a stored Context. - - Returns: - Callable[[~.DeleteContextRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_context' not in self._stubs: - self._stubs['delete_context'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/DeleteContext', - request_serializer=metadata_service.DeleteContextRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_context'] - - @property - def purge_contexts(self) -> Callable[ - [metadata_service.PurgeContextsRequest], - operations_pb2.Operation]: - r"""Return a callable for the purge contexts method over gRPC. - - Purges Contexts. - - Returns: - Callable[[~.PurgeContextsRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'purge_contexts' not in self._stubs: - self._stubs['purge_contexts'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/PurgeContexts', - request_serializer=metadata_service.PurgeContextsRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['purge_contexts'] - - @property - def add_context_artifacts_and_executions(self) -> Callable[ - [metadata_service.AddContextArtifactsAndExecutionsRequest], - metadata_service.AddContextArtifactsAndExecutionsResponse]: - r"""Return a callable for the add context artifacts and - executions method over gRPC. - - Adds a set of Artifacts and Executions to a Context. - If any of the Artifacts or Executions have already been - added to a Context, they are simply skipped. - - Returns: - Callable[[~.AddContextArtifactsAndExecutionsRequest], - ~.AddContextArtifactsAndExecutionsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'add_context_artifacts_and_executions' not in self._stubs: - self._stubs['add_context_artifacts_and_executions'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/AddContextArtifactsAndExecutions', - request_serializer=metadata_service.AddContextArtifactsAndExecutionsRequest.serialize, - response_deserializer=metadata_service.AddContextArtifactsAndExecutionsResponse.deserialize, - ) - return self._stubs['add_context_artifacts_and_executions'] - - @property - def add_context_children(self) -> Callable[ - [metadata_service.AddContextChildrenRequest], - metadata_service.AddContextChildrenResponse]: - r"""Return a callable for the add context children method over gRPC. - - Adds a set of Contexts as children to a parent Context. If any - of the child Contexts have already been added to the parent - Context, they are simply skipped. If this call would create a - cycle or cause any Context to have more than 10 parents, the - request will fail with an INVALID_ARGUMENT error. - - Returns: - Callable[[~.AddContextChildrenRequest], - ~.AddContextChildrenResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'add_context_children' not in self._stubs: - self._stubs['add_context_children'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/AddContextChildren', - request_serializer=metadata_service.AddContextChildrenRequest.serialize, - response_deserializer=metadata_service.AddContextChildrenResponse.deserialize, - ) - return self._stubs['add_context_children'] - - @property - def query_context_lineage_subgraph(self) -> Callable[ - [metadata_service.QueryContextLineageSubgraphRequest], - lineage_subgraph.LineageSubgraph]: - r"""Return a callable for the query context lineage subgraph method over gRPC. - - Retrieves Artifacts and Executions within the - specified Context, connected by Event edges and returned - as a LineageSubgraph. - - Returns: - Callable[[~.QueryContextLineageSubgraphRequest], - ~.LineageSubgraph]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'query_context_lineage_subgraph' not in self._stubs: - self._stubs['query_context_lineage_subgraph'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/QueryContextLineageSubgraph', - request_serializer=metadata_service.QueryContextLineageSubgraphRequest.serialize, - response_deserializer=lineage_subgraph.LineageSubgraph.deserialize, - ) - return self._stubs['query_context_lineage_subgraph'] - - @property - def create_execution(self) -> Callable[ - [metadata_service.CreateExecutionRequest], - gca_execution.Execution]: - r"""Return a callable for the create execution method over gRPC. - - Creates an Execution associated with a MetadataStore. - - Returns: - Callable[[~.CreateExecutionRequest], - ~.Execution]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_execution' not in self._stubs: - self._stubs['create_execution'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/CreateExecution', - request_serializer=metadata_service.CreateExecutionRequest.serialize, - response_deserializer=gca_execution.Execution.deserialize, - ) - return self._stubs['create_execution'] - - @property - def get_execution(self) -> Callable[ - [metadata_service.GetExecutionRequest], - execution.Execution]: - r"""Return a callable for the get execution method over gRPC. - - Retrieves a specific Execution. - - Returns: - Callable[[~.GetExecutionRequest], - ~.Execution]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_execution' not in self._stubs: - self._stubs['get_execution'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/GetExecution', - request_serializer=metadata_service.GetExecutionRequest.serialize, - response_deserializer=execution.Execution.deserialize, - ) - return self._stubs['get_execution'] - - @property - def list_executions(self) -> Callable[ - [metadata_service.ListExecutionsRequest], - metadata_service.ListExecutionsResponse]: - r"""Return a callable for the list executions method over gRPC. - - Lists Executions in the MetadataStore. - - Returns: - Callable[[~.ListExecutionsRequest], - ~.ListExecutionsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_executions' not in self._stubs: - self._stubs['list_executions'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/ListExecutions', - request_serializer=metadata_service.ListExecutionsRequest.serialize, - response_deserializer=metadata_service.ListExecutionsResponse.deserialize, - ) - return self._stubs['list_executions'] - - @property - def update_execution(self) -> Callable[ - [metadata_service.UpdateExecutionRequest], - gca_execution.Execution]: - r"""Return a callable for the update execution method over gRPC. - - Updates a stored Execution. - - Returns: - Callable[[~.UpdateExecutionRequest], - ~.Execution]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_execution' not in self._stubs: - self._stubs['update_execution'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/UpdateExecution', - request_serializer=metadata_service.UpdateExecutionRequest.serialize, - response_deserializer=gca_execution.Execution.deserialize, - ) - return self._stubs['update_execution'] - - @property - def delete_execution(self) -> Callable[ - [metadata_service.DeleteExecutionRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete execution method over gRPC. - - Deletes an Execution. - - Returns: - Callable[[~.DeleteExecutionRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_execution' not in self._stubs: - self._stubs['delete_execution'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/DeleteExecution', - request_serializer=metadata_service.DeleteExecutionRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_execution'] - - @property - def purge_executions(self) -> Callable[ - [metadata_service.PurgeExecutionsRequest], - operations_pb2.Operation]: - r"""Return a callable for the purge executions method over gRPC. - - Purges Executions. - - Returns: - Callable[[~.PurgeExecutionsRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'purge_executions' not in self._stubs: - self._stubs['purge_executions'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/PurgeExecutions', - request_serializer=metadata_service.PurgeExecutionsRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['purge_executions'] - - @property - def add_execution_events(self) -> Callable[ - [metadata_service.AddExecutionEventsRequest], - metadata_service.AddExecutionEventsResponse]: - r"""Return a callable for the add execution events method over gRPC. - - Adds Events to the specified Execution. An Event - indicates whether an Artifact was used as an input or - output for an Execution. If an Event already exists - between the Execution and the Artifact, the Event is - skipped. - - Returns: - Callable[[~.AddExecutionEventsRequest], - ~.AddExecutionEventsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'add_execution_events' not in self._stubs: - self._stubs['add_execution_events'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/AddExecutionEvents', - request_serializer=metadata_service.AddExecutionEventsRequest.serialize, - response_deserializer=metadata_service.AddExecutionEventsResponse.deserialize, - ) - return self._stubs['add_execution_events'] - - @property - def query_execution_inputs_and_outputs(self) -> Callable[ - [metadata_service.QueryExecutionInputsAndOutputsRequest], - lineage_subgraph.LineageSubgraph]: - r"""Return a callable for the query execution inputs and - outputs method over gRPC. - - Obtains the set of input and output Artifacts for - this Execution, in the form of LineageSubgraph that also - contains the Execution and connecting Events. - - Returns: - Callable[[~.QueryExecutionInputsAndOutputsRequest], - ~.LineageSubgraph]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'query_execution_inputs_and_outputs' not in self._stubs: - self._stubs['query_execution_inputs_and_outputs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/QueryExecutionInputsAndOutputs', - request_serializer=metadata_service.QueryExecutionInputsAndOutputsRequest.serialize, - response_deserializer=lineage_subgraph.LineageSubgraph.deserialize, - ) - return self._stubs['query_execution_inputs_and_outputs'] - - @property - def create_metadata_schema(self) -> Callable[ - [metadata_service.CreateMetadataSchemaRequest], - gca_metadata_schema.MetadataSchema]: - r"""Return a callable for the create metadata schema method over gRPC. - - Creates a MetadataSchema. - - Returns: - Callable[[~.CreateMetadataSchemaRequest], - ~.MetadataSchema]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_metadata_schema' not in self._stubs: - self._stubs['create_metadata_schema'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/CreateMetadataSchema', - request_serializer=metadata_service.CreateMetadataSchemaRequest.serialize, - response_deserializer=gca_metadata_schema.MetadataSchema.deserialize, - ) - return self._stubs['create_metadata_schema'] - - @property - def get_metadata_schema(self) -> Callable[ - [metadata_service.GetMetadataSchemaRequest], - metadata_schema.MetadataSchema]: - r"""Return a callable for the get metadata schema method over gRPC. - - Retrieves a specific MetadataSchema. - - Returns: - Callable[[~.GetMetadataSchemaRequest], - ~.MetadataSchema]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_metadata_schema' not in self._stubs: - self._stubs['get_metadata_schema'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/GetMetadataSchema', - request_serializer=metadata_service.GetMetadataSchemaRequest.serialize, - response_deserializer=metadata_schema.MetadataSchema.deserialize, - ) - return self._stubs['get_metadata_schema'] - - @property - def list_metadata_schemas(self) -> Callable[ - [metadata_service.ListMetadataSchemasRequest], - metadata_service.ListMetadataSchemasResponse]: - r"""Return a callable for the list metadata schemas method over gRPC. - - Lists MetadataSchemas. - - Returns: - Callable[[~.ListMetadataSchemasRequest], - ~.ListMetadataSchemasResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_metadata_schemas' not in self._stubs: - self._stubs['list_metadata_schemas'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/ListMetadataSchemas', - request_serializer=metadata_service.ListMetadataSchemasRequest.serialize, - response_deserializer=metadata_service.ListMetadataSchemasResponse.deserialize, - ) - return self._stubs['list_metadata_schemas'] - - @property - def query_artifact_lineage_subgraph(self) -> Callable[ - [metadata_service.QueryArtifactLineageSubgraphRequest], - lineage_subgraph.LineageSubgraph]: - r"""Return a callable for the query artifact lineage - subgraph method over gRPC. - - Retrieves lineage of an Artifact represented through - Artifacts and Executions connected by Event edges and - returned as a LineageSubgraph. - - Returns: - Callable[[~.QueryArtifactLineageSubgraphRequest], - ~.LineageSubgraph]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'query_artifact_lineage_subgraph' not in self._stubs: - self._stubs['query_artifact_lineage_subgraph'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/QueryArtifactLineageSubgraph', - request_serializer=metadata_service.QueryArtifactLineageSubgraphRequest.serialize, - response_deserializer=lineage_subgraph.LineageSubgraph.deserialize, - ) - return self._stubs['query_artifact_lineage_subgraph'] - - def close(self): - self.grpc_channel.close() - -__all__ = ( - 'MetadataServiceGrpcTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/transports/grpc_asyncio.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/transports/grpc_asyncio.py deleted file mode 100644 index 26863c47dd..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/metadata_service/transports/grpc_asyncio.py +++ /dev/null @@ -1,1088 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers_async -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.aiplatform_v1.types import artifact -from google.cloud.aiplatform_v1.types import artifact as gca_artifact -from google.cloud.aiplatform_v1.types import context -from google.cloud.aiplatform_v1.types import context as gca_context -from google.cloud.aiplatform_v1.types import execution -from google.cloud.aiplatform_v1.types import execution as gca_execution -from google.cloud.aiplatform_v1.types import lineage_subgraph -from google.cloud.aiplatform_v1.types import metadata_schema -from google.cloud.aiplatform_v1.types import metadata_schema as gca_metadata_schema -from google.cloud.aiplatform_v1.types import metadata_service -from google.cloud.aiplatform_v1.types import metadata_store -from google.longrunning import operations_pb2 # type: ignore -from .base import MetadataServiceTransport, DEFAULT_CLIENT_INFO -from .grpc import MetadataServiceGrpcTransport - - -class MetadataServiceGrpcAsyncIOTransport(MetadataServiceTransport): - """gRPC AsyncIO backend transport for MetadataService. - - Service for reading and writing metadata entries. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsAsyncClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_metadata_store(self) -> Callable[ - [metadata_service.CreateMetadataStoreRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the create metadata store method over gRPC. - - Initializes a MetadataStore, including allocation of - resources. - - Returns: - Callable[[~.CreateMetadataStoreRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_metadata_store' not in self._stubs: - self._stubs['create_metadata_store'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/CreateMetadataStore', - request_serializer=metadata_service.CreateMetadataStoreRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_metadata_store'] - - @property - def get_metadata_store(self) -> Callable[ - [metadata_service.GetMetadataStoreRequest], - Awaitable[metadata_store.MetadataStore]]: - r"""Return a callable for the get metadata store method over gRPC. - - Retrieves a specific MetadataStore. - - Returns: - Callable[[~.GetMetadataStoreRequest], - Awaitable[~.MetadataStore]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_metadata_store' not in self._stubs: - self._stubs['get_metadata_store'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/GetMetadataStore', - request_serializer=metadata_service.GetMetadataStoreRequest.serialize, - response_deserializer=metadata_store.MetadataStore.deserialize, - ) - return self._stubs['get_metadata_store'] - - @property - def list_metadata_stores(self) -> Callable[ - [metadata_service.ListMetadataStoresRequest], - Awaitable[metadata_service.ListMetadataStoresResponse]]: - r"""Return a callable for the list metadata stores method over gRPC. - - Lists MetadataStores for a Location. - - Returns: - Callable[[~.ListMetadataStoresRequest], - Awaitable[~.ListMetadataStoresResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_metadata_stores' not in self._stubs: - self._stubs['list_metadata_stores'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/ListMetadataStores', - request_serializer=metadata_service.ListMetadataStoresRequest.serialize, - response_deserializer=metadata_service.ListMetadataStoresResponse.deserialize, - ) - return self._stubs['list_metadata_stores'] - - @property - def delete_metadata_store(self) -> Callable[ - [metadata_service.DeleteMetadataStoreRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete metadata store method over gRPC. - - Deletes a single MetadataStore and all its child - resources (Artifacts, Executions, and Contexts). - - Returns: - Callable[[~.DeleteMetadataStoreRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_metadata_store' not in self._stubs: - self._stubs['delete_metadata_store'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/DeleteMetadataStore', - request_serializer=metadata_service.DeleteMetadataStoreRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_metadata_store'] - - @property - def create_artifact(self) -> Callable[ - [metadata_service.CreateArtifactRequest], - Awaitable[gca_artifact.Artifact]]: - r"""Return a callable for the create artifact method over gRPC. - - Creates an Artifact associated with a MetadataStore. - - Returns: - Callable[[~.CreateArtifactRequest], - Awaitable[~.Artifact]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_artifact' not in self._stubs: - self._stubs['create_artifact'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/CreateArtifact', - request_serializer=metadata_service.CreateArtifactRequest.serialize, - response_deserializer=gca_artifact.Artifact.deserialize, - ) - return self._stubs['create_artifact'] - - @property - def get_artifact(self) -> Callable[ - [metadata_service.GetArtifactRequest], - Awaitable[artifact.Artifact]]: - r"""Return a callable for the get artifact method over gRPC. - - Retrieves a specific Artifact. - - Returns: - Callable[[~.GetArtifactRequest], - Awaitable[~.Artifact]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_artifact' not in self._stubs: - self._stubs['get_artifact'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/GetArtifact', - request_serializer=metadata_service.GetArtifactRequest.serialize, - response_deserializer=artifact.Artifact.deserialize, - ) - return self._stubs['get_artifact'] - - @property - def list_artifacts(self) -> Callable[ - [metadata_service.ListArtifactsRequest], - Awaitable[metadata_service.ListArtifactsResponse]]: - r"""Return a callable for the list artifacts method over gRPC. - - Lists Artifacts in the MetadataStore. - - Returns: - Callable[[~.ListArtifactsRequest], - Awaitable[~.ListArtifactsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_artifacts' not in self._stubs: - self._stubs['list_artifacts'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/ListArtifacts', - request_serializer=metadata_service.ListArtifactsRequest.serialize, - response_deserializer=metadata_service.ListArtifactsResponse.deserialize, - ) - return self._stubs['list_artifacts'] - - @property - def update_artifact(self) -> Callable[ - [metadata_service.UpdateArtifactRequest], - Awaitable[gca_artifact.Artifact]]: - r"""Return a callable for the update artifact method over gRPC. - - Updates a stored Artifact. - - Returns: - Callable[[~.UpdateArtifactRequest], - Awaitable[~.Artifact]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_artifact' not in self._stubs: - self._stubs['update_artifact'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/UpdateArtifact', - request_serializer=metadata_service.UpdateArtifactRequest.serialize, - response_deserializer=gca_artifact.Artifact.deserialize, - ) - return self._stubs['update_artifact'] - - @property - def delete_artifact(self) -> Callable[ - [metadata_service.DeleteArtifactRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete artifact method over gRPC. - - Deletes an Artifact. - - Returns: - Callable[[~.DeleteArtifactRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_artifact' not in self._stubs: - self._stubs['delete_artifact'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/DeleteArtifact', - request_serializer=metadata_service.DeleteArtifactRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_artifact'] - - @property - def purge_artifacts(self) -> Callable[ - [metadata_service.PurgeArtifactsRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the purge artifacts method over gRPC. - - Purges Artifacts. - - Returns: - Callable[[~.PurgeArtifactsRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'purge_artifacts' not in self._stubs: - self._stubs['purge_artifacts'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/PurgeArtifacts', - request_serializer=metadata_service.PurgeArtifactsRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['purge_artifacts'] - - @property - def create_context(self) -> Callable[ - [metadata_service.CreateContextRequest], - Awaitable[gca_context.Context]]: - r"""Return a callable for the create context method over gRPC. - - Creates a Context associated with a MetadataStore. - - Returns: - Callable[[~.CreateContextRequest], - Awaitable[~.Context]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_context' not in self._stubs: - self._stubs['create_context'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/CreateContext', - request_serializer=metadata_service.CreateContextRequest.serialize, - response_deserializer=gca_context.Context.deserialize, - ) - return self._stubs['create_context'] - - @property - def get_context(self) -> Callable[ - [metadata_service.GetContextRequest], - Awaitable[context.Context]]: - r"""Return a callable for the get context method over gRPC. - - Retrieves a specific Context. - - Returns: - Callable[[~.GetContextRequest], - Awaitable[~.Context]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_context' not in self._stubs: - self._stubs['get_context'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/GetContext', - request_serializer=metadata_service.GetContextRequest.serialize, - response_deserializer=context.Context.deserialize, - ) - return self._stubs['get_context'] - - @property - def list_contexts(self) -> Callable[ - [metadata_service.ListContextsRequest], - Awaitable[metadata_service.ListContextsResponse]]: - r"""Return a callable for the list contexts method over gRPC. - - Lists Contexts on the MetadataStore. - - Returns: - Callable[[~.ListContextsRequest], - Awaitable[~.ListContextsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_contexts' not in self._stubs: - self._stubs['list_contexts'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/ListContexts', - request_serializer=metadata_service.ListContextsRequest.serialize, - response_deserializer=metadata_service.ListContextsResponse.deserialize, - ) - return self._stubs['list_contexts'] - - @property - def update_context(self) -> Callable[ - [metadata_service.UpdateContextRequest], - Awaitable[gca_context.Context]]: - r"""Return a callable for the update context method over gRPC. - - Updates a stored Context. - - Returns: - Callable[[~.UpdateContextRequest], - Awaitable[~.Context]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_context' not in self._stubs: - self._stubs['update_context'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/UpdateContext', - request_serializer=metadata_service.UpdateContextRequest.serialize, - response_deserializer=gca_context.Context.deserialize, - ) - return self._stubs['update_context'] - - @property - def delete_context(self) -> Callable[ - [metadata_service.DeleteContextRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete context method over gRPC. - - Deletes a stored Context. - - Returns: - Callable[[~.DeleteContextRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_context' not in self._stubs: - self._stubs['delete_context'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/DeleteContext', - request_serializer=metadata_service.DeleteContextRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_context'] - - @property - def purge_contexts(self) -> Callable[ - [metadata_service.PurgeContextsRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the purge contexts method over gRPC. - - Purges Contexts. - - Returns: - Callable[[~.PurgeContextsRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'purge_contexts' not in self._stubs: - self._stubs['purge_contexts'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/PurgeContexts', - request_serializer=metadata_service.PurgeContextsRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['purge_contexts'] - - @property - def add_context_artifacts_and_executions(self) -> Callable[ - [metadata_service.AddContextArtifactsAndExecutionsRequest], - Awaitable[metadata_service.AddContextArtifactsAndExecutionsResponse]]: - r"""Return a callable for the add context artifacts and - executions method over gRPC. - - Adds a set of Artifacts and Executions to a Context. - If any of the Artifacts or Executions have already been - added to a Context, they are simply skipped. - - Returns: - Callable[[~.AddContextArtifactsAndExecutionsRequest], - Awaitable[~.AddContextArtifactsAndExecutionsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'add_context_artifacts_and_executions' not in self._stubs: - self._stubs['add_context_artifacts_and_executions'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/AddContextArtifactsAndExecutions', - request_serializer=metadata_service.AddContextArtifactsAndExecutionsRequest.serialize, - response_deserializer=metadata_service.AddContextArtifactsAndExecutionsResponse.deserialize, - ) - return self._stubs['add_context_artifacts_and_executions'] - - @property - def add_context_children(self) -> Callable[ - [metadata_service.AddContextChildrenRequest], - Awaitable[metadata_service.AddContextChildrenResponse]]: - r"""Return a callable for the add context children method over gRPC. - - Adds a set of Contexts as children to a parent Context. If any - of the child Contexts have already been added to the parent - Context, they are simply skipped. If this call would create a - cycle or cause any Context to have more than 10 parents, the - request will fail with an INVALID_ARGUMENT error. - - Returns: - Callable[[~.AddContextChildrenRequest], - Awaitable[~.AddContextChildrenResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'add_context_children' not in self._stubs: - self._stubs['add_context_children'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/AddContextChildren', - request_serializer=metadata_service.AddContextChildrenRequest.serialize, - response_deserializer=metadata_service.AddContextChildrenResponse.deserialize, - ) - return self._stubs['add_context_children'] - - @property - def query_context_lineage_subgraph(self) -> Callable[ - [metadata_service.QueryContextLineageSubgraphRequest], - Awaitable[lineage_subgraph.LineageSubgraph]]: - r"""Return a callable for the query context lineage subgraph method over gRPC. - - Retrieves Artifacts and Executions within the - specified Context, connected by Event edges and returned - as a LineageSubgraph. - - Returns: - Callable[[~.QueryContextLineageSubgraphRequest], - Awaitable[~.LineageSubgraph]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'query_context_lineage_subgraph' not in self._stubs: - self._stubs['query_context_lineage_subgraph'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/QueryContextLineageSubgraph', - request_serializer=metadata_service.QueryContextLineageSubgraphRequest.serialize, - response_deserializer=lineage_subgraph.LineageSubgraph.deserialize, - ) - return self._stubs['query_context_lineage_subgraph'] - - @property - def create_execution(self) -> Callable[ - [metadata_service.CreateExecutionRequest], - Awaitable[gca_execution.Execution]]: - r"""Return a callable for the create execution method over gRPC. - - Creates an Execution associated with a MetadataStore. - - Returns: - Callable[[~.CreateExecutionRequest], - Awaitable[~.Execution]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_execution' not in self._stubs: - self._stubs['create_execution'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/CreateExecution', - request_serializer=metadata_service.CreateExecutionRequest.serialize, - response_deserializer=gca_execution.Execution.deserialize, - ) - return self._stubs['create_execution'] - - @property - def get_execution(self) -> Callable[ - [metadata_service.GetExecutionRequest], - Awaitable[execution.Execution]]: - r"""Return a callable for the get execution method over gRPC. - - Retrieves a specific Execution. - - Returns: - Callable[[~.GetExecutionRequest], - Awaitable[~.Execution]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_execution' not in self._stubs: - self._stubs['get_execution'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/GetExecution', - request_serializer=metadata_service.GetExecutionRequest.serialize, - response_deserializer=execution.Execution.deserialize, - ) - return self._stubs['get_execution'] - - @property - def list_executions(self) -> Callable[ - [metadata_service.ListExecutionsRequest], - Awaitable[metadata_service.ListExecutionsResponse]]: - r"""Return a callable for the list executions method over gRPC. - - Lists Executions in the MetadataStore. - - Returns: - Callable[[~.ListExecutionsRequest], - Awaitable[~.ListExecutionsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_executions' not in self._stubs: - self._stubs['list_executions'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/ListExecutions', - request_serializer=metadata_service.ListExecutionsRequest.serialize, - response_deserializer=metadata_service.ListExecutionsResponse.deserialize, - ) - return self._stubs['list_executions'] - - @property - def update_execution(self) -> Callable[ - [metadata_service.UpdateExecutionRequest], - Awaitable[gca_execution.Execution]]: - r"""Return a callable for the update execution method over gRPC. - - Updates a stored Execution. - - Returns: - Callable[[~.UpdateExecutionRequest], - Awaitable[~.Execution]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_execution' not in self._stubs: - self._stubs['update_execution'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/UpdateExecution', - request_serializer=metadata_service.UpdateExecutionRequest.serialize, - response_deserializer=gca_execution.Execution.deserialize, - ) - return self._stubs['update_execution'] - - @property - def delete_execution(self) -> Callable[ - [metadata_service.DeleteExecutionRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete execution method over gRPC. - - Deletes an Execution. - - Returns: - Callable[[~.DeleteExecutionRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_execution' not in self._stubs: - self._stubs['delete_execution'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/DeleteExecution', - request_serializer=metadata_service.DeleteExecutionRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_execution'] - - @property - def purge_executions(self) -> Callable[ - [metadata_service.PurgeExecutionsRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the purge executions method over gRPC. - - Purges Executions. - - Returns: - Callable[[~.PurgeExecutionsRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'purge_executions' not in self._stubs: - self._stubs['purge_executions'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/PurgeExecutions', - request_serializer=metadata_service.PurgeExecutionsRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['purge_executions'] - - @property - def add_execution_events(self) -> Callable[ - [metadata_service.AddExecutionEventsRequest], - Awaitable[metadata_service.AddExecutionEventsResponse]]: - r"""Return a callable for the add execution events method over gRPC. - - Adds Events to the specified Execution. An Event - indicates whether an Artifact was used as an input or - output for an Execution. If an Event already exists - between the Execution and the Artifact, the Event is - skipped. - - Returns: - Callable[[~.AddExecutionEventsRequest], - Awaitable[~.AddExecutionEventsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'add_execution_events' not in self._stubs: - self._stubs['add_execution_events'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/AddExecutionEvents', - request_serializer=metadata_service.AddExecutionEventsRequest.serialize, - response_deserializer=metadata_service.AddExecutionEventsResponse.deserialize, - ) - return self._stubs['add_execution_events'] - - @property - def query_execution_inputs_and_outputs(self) -> Callable[ - [metadata_service.QueryExecutionInputsAndOutputsRequest], - Awaitable[lineage_subgraph.LineageSubgraph]]: - r"""Return a callable for the query execution inputs and - outputs method over gRPC. - - Obtains the set of input and output Artifacts for - this Execution, in the form of LineageSubgraph that also - contains the Execution and connecting Events. - - Returns: - Callable[[~.QueryExecutionInputsAndOutputsRequest], - Awaitable[~.LineageSubgraph]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'query_execution_inputs_and_outputs' not in self._stubs: - self._stubs['query_execution_inputs_and_outputs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/QueryExecutionInputsAndOutputs', - request_serializer=metadata_service.QueryExecutionInputsAndOutputsRequest.serialize, - response_deserializer=lineage_subgraph.LineageSubgraph.deserialize, - ) - return self._stubs['query_execution_inputs_and_outputs'] - - @property - def create_metadata_schema(self) -> Callable[ - [metadata_service.CreateMetadataSchemaRequest], - Awaitable[gca_metadata_schema.MetadataSchema]]: - r"""Return a callable for the create metadata schema method over gRPC. - - Creates a MetadataSchema. - - Returns: - Callable[[~.CreateMetadataSchemaRequest], - Awaitable[~.MetadataSchema]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_metadata_schema' not in self._stubs: - self._stubs['create_metadata_schema'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/CreateMetadataSchema', - request_serializer=metadata_service.CreateMetadataSchemaRequest.serialize, - response_deserializer=gca_metadata_schema.MetadataSchema.deserialize, - ) - return self._stubs['create_metadata_schema'] - - @property - def get_metadata_schema(self) -> Callable[ - [metadata_service.GetMetadataSchemaRequest], - Awaitable[metadata_schema.MetadataSchema]]: - r"""Return a callable for the get metadata schema method over gRPC. - - Retrieves a specific MetadataSchema. - - Returns: - Callable[[~.GetMetadataSchemaRequest], - Awaitable[~.MetadataSchema]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_metadata_schema' not in self._stubs: - self._stubs['get_metadata_schema'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/GetMetadataSchema', - request_serializer=metadata_service.GetMetadataSchemaRequest.serialize, - response_deserializer=metadata_schema.MetadataSchema.deserialize, - ) - return self._stubs['get_metadata_schema'] - - @property - def list_metadata_schemas(self) -> Callable[ - [metadata_service.ListMetadataSchemasRequest], - Awaitable[metadata_service.ListMetadataSchemasResponse]]: - r"""Return a callable for the list metadata schemas method over gRPC. - - Lists MetadataSchemas. - - Returns: - Callable[[~.ListMetadataSchemasRequest], - Awaitable[~.ListMetadataSchemasResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_metadata_schemas' not in self._stubs: - self._stubs['list_metadata_schemas'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/ListMetadataSchemas', - request_serializer=metadata_service.ListMetadataSchemasRequest.serialize, - response_deserializer=metadata_service.ListMetadataSchemasResponse.deserialize, - ) - return self._stubs['list_metadata_schemas'] - - @property - def query_artifact_lineage_subgraph(self) -> Callable[ - [metadata_service.QueryArtifactLineageSubgraphRequest], - Awaitable[lineage_subgraph.LineageSubgraph]]: - r"""Return a callable for the query artifact lineage - subgraph method over gRPC. - - Retrieves lineage of an Artifact represented through - Artifacts and Executions connected by Event edges and - returned as a LineageSubgraph. - - Returns: - Callable[[~.QueryArtifactLineageSubgraphRequest], - Awaitable[~.LineageSubgraph]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'query_artifact_lineage_subgraph' not in self._stubs: - self._stubs['query_artifact_lineage_subgraph'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MetadataService/QueryArtifactLineageSubgraph', - request_serializer=metadata_service.QueryArtifactLineageSubgraphRequest.serialize, - response_deserializer=lineage_subgraph.LineageSubgraph.deserialize, - ) - return self._stubs['query_artifact_lineage_subgraph'] - - def close(self): - return self.grpc_channel.close() - - -__all__ = ( - 'MetadataServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/__init__.py deleted file mode 100644 index b32b10b1d7..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import MigrationServiceClient -from .async_client import MigrationServiceAsyncClient - -__all__ = ( - 'MigrationServiceClient', - 'MigrationServiceAsyncClient', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/async_client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/async_client.py deleted file mode 100644 index 88490a92e5..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/async_client.py +++ /dev/null @@ -1,383 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core.client_options import ClientOptions -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api_core import operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1.services.migration_service import pagers -from google.cloud.aiplatform_v1.types import migratable_resource -from google.cloud.aiplatform_v1.types import migration_service -from .transports.base import MigrationServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import MigrationServiceGrpcAsyncIOTransport -from .client import MigrationServiceClient - - -class MigrationServiceAsyncClient: - """A service that migrates resources from automl.googleapis.com, - datalabeling.googleapis.com and ml.googleapis.com to Vertex AI. - """ - - _client: MigrationServiceClient - - DEFAULT_ENDPOINT = MigrationServiceClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = MigrationServiceClient.DEFAULT_MTLS_ENDPOINT - - annotated_dataset_path = staticmethod(MigrationServiceClient.annotated_dataset_path) - parse_annotated_dataset_path = staticmethod(MigrationServiceClient.parse_annotated_dataset_path) - dataset_path = staticmethod(MigrationServiceClient.dataset_path) - parse_dataset_path = staticmethod(MigrationServiceClient.parse_dataset_path) - dataset_path = staticmethod(MigrationServiceClient.dataset_path) - parse_dataset_path = staticmethod(MigrationServiceClient.parse_dataset_path) - dataset_path = staticmethod(MigrationServiceClient.dataset_path) - parse_dataset_path = staticmethod(MigrationServiceClient.parse_dataset_path) - model_path = staticmethod(MigrationServiceClient.model_path) - parse_model_path = staticmethod(MigrationServiceClient.parse_model_path) - model_path = staticmethod(MigrationServiceClient.model_path) - parse_model_path = staticmethod(MigrationServiceClient.parse_model_path) - version_path = staticmethod(MigrationServiceClient.version_path) - parse_version_path = staticmethod(MigrationServiceClient.parse_version_path) - common_billing_account_path = staticmethod(MigrationServiceClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(MigrationServiceClient.parse_common_billing_account_path) - common_folder_path = staticmethod(MigrationServiceClient.common_folder_path) - parse_common_folder_path = staticmethod(MigrationServiceClient.parse_common_folder_path) - common_organization_path = staticmethod(MigrationServiceClient.common_organization_path) - parse_common_organization_path = staticmethod(MigrationServiceClient.parse_common_organization_path) - common_project_path = staticmethod(MigrationServiceClient.common_project_path) - parse_common_project_path = staticmethod(MigrationServiceClient.parse_common_project_path) - common_location_path = staticmethod(MigrationServiceClient.common_location_path) - parse_common_location_path = staticmethod(MigrationServiceClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - MigrationServiceAsyncClient: The constructed client. - """ - return MigrationServiceClient.from_service_account_info.__func__(MigrationServiceAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - MigrationServiceAsyncClient: The constructed client. - """ - return MigrationServiceClient.from_service_account_file.__func__(MigrationServiceAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> MigrationServiceTransport: - """Returns the transport used by the client instance. - - Returns: - MigrationServiceTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(MigrationServiceClient).get_transport_class, type(MigrationServiceClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, MigrationServiceTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the migration service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.MigrationServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = MigrationServiceClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def search_migratable_resources(self, - request: Union[migration_service.SearchMigratableResourcesRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.SearchMigratableResourcesAsyncPager: - r"""Searches all of the resources in - automl.googleapis.com, datalabeling.googleapis.com and - ml.googleapis.com that can be migrated to Vertex AI's - given location. - - Args: - request (Union[google.cloud.aiplatform_v1.types.SearchMigratableResourcesRequest, dict]): - The request object. Request message for - [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1.MigrationService.SearchMigratableResources]. - parent (:class:`str`): - Required. The location that the migratable resources - should be searched from. It's the Vertex AI location - that the resources can be migrated to, not the - resources' original location. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.migration_service.pagers.SearchMigratableResourcesAsyncPager: - Response message for - [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1.MigrationService.SearchMigratableResources]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = migration_service.SearchMigratableResourcesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.search_migratable_resources, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.SearchMigratableResourcesAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def batch_migrate_resources(self, - request: Union[migration_service.BatchMigrateResourcesRequest, dict] = None, - *, - parent: str = None, - migrate_resource_requests: Sequence[migration_service.MigrateResourceRequest] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Batch migrates resources from ml.googleapis.com, - automl.googleapis.com, and datalabeling.googleapis.com - to Vertex AI. - - Args: - request (Union[google.cloud.aiplatform_v1.types.BatchMigrateResourcesRequest, dict]): - The request object. Request message for - [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1.MigrationService.BatchMigrateResources]. - parent (:class:`str`): - Required. The location of the migrated resource will - live in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - migrate_resource_requests (:class:`Sequence[google.cloud.aiplatform_v1.types.MigrateResourceRequest]`): - Required. The request messages - specifying the resources to migrate. - They must be in the same location as the - destination. Up to 50 resources can be - migrated in one batch. - - This corresponds to the ``migrate_resource_requests`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1.types.BatchMigrateResourcesResponse` - Response message for - [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1.MigrationService.BatchMigrateResources]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, migrate_resource_requests]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = migration_service.BatchMigrateResourcesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if migrate_resource_requests: - request.migrate_resource_requests.extend(migrate_resource_requests) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.batch_migrate_resources, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - migration_service.BatchMigrateResourcesResponse, - metadata_type=migration_service.BatchMigrateResourcesOperationMetadata, - ) - - # Done; return the response. - return response - - async def __aenter__(self): - return self - - async def __aexit__(self, exc_type, exc, tb): - await self.transport.close() - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "MigrationServiceAsyncClient", -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/client.py deleted file mode 100644 index 2029981a0e..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/client.py +++ /dev/null @@ -1,635 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import os -import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api_core import operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1.services.migration_service import pagers -from google.cloud.aiplatform_v1.types import migratable_resource -from google.cloud.aiplatform_v1.types import migration_service -from .transports.base import MigrationServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import MigrationServiceGrpcTransport -from .transports.grpc_asyncio import MigrationServiceGrpcAsyncIOTransport - - -class MigrationServiceClientMeta(type): - """Metaclass for the MigrationService client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[MigrationServiceTransport]] - _transport_registry["grpc"] = MigrationServiceGrpcTransport - _transport_registry["grpc_asyncio"] = MigrationServiceGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[MigrationServiceTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class MigrationServiceClient(metaclass=MigrationServiceClientMeta): - """A service that migrates resources from automl.googleapis.com, - datalabeling.googleapis.com and ml.googleapis.com to Vertex AI. - """ - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "aiplatform.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - MigrationServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - MigrationServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> MigrationServiceTransport: - """Returns the transport used by the client instance. - - Returns: - MigrationServiceTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def annotated_dataset_path(project: str,dataset: str,annotated_dataset: str,) -> str: - """Returns a fully-qualified annotated_dataset string.""" - return "projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}".format(project=project, dataset=dataset, annotated_dataset=annotated_dataset, ) - - @staticmethod - def parse_annotated_dataset_path(path: str) -> Dict[str,str]: - """Parses a annotated_dataset path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)/annotatedDatasets/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def dataset_path(project: str,location: str,dataset: str,) -> str: - """Returns a fully-qualified dataset string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) - - @staticmethod - def parse_dataset_path(path: str) -> Dict[str,str]: - """Parses a dataset path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def dataset_path(project: str,dataset: str,) -> str: - """Returns a fully-qualified dataset string.""" - return "projects/{project}/datasets/{dataset}".format(project=project, dataset=dataset, ) - - @staticmethod - def parse_dataset_path(path: str) -> Dict[str,str]: - """Parses a dataset path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def dataset_path(project: str,location: str,dataset: str,) -> str: - """Returns a fully-qualified dataset string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) - - @staticmethod - def parse_dataset_path(path: str) -> Dict[str,str]: - """Parses a dataset path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def model_path(project: str,location: str,model: str,) -> str: - """Returns a fully-qualified model string.""" - return "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) - - @staticmethod - def parse_model_path(path: str) -> Dict[str,str]: - """Parses a model path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def model_path(project: str,location: str,model: str,) -> str: - """Returns a fully-qualified model string.""" - return "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) - - @staticmethod - def parse_model_path(path: str) -> Dict[str,str]: - """Parses a model path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def version_path(project: str,model: str,version: str,) -> str: - """Returns a fully-qualified version string.""" - return "projects/{project}/models/{model}/versions/{version}".format(project=project, model=model, version=version, ) - - @staticmethod - def parse_version_path(path: str) -> Dict[str,str]: - """Parses a version path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/models/(?P.+?)/versions/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, MigrationServiceTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the migration service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, MigrationServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): - raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, MigrationServiceTransport): - # transport is a MigrationServiceTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - always_use_jwt_access=True, - ) - - def search_migratable_resources(self, - request: Union[migration_service.SearchMigratableResourcesRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.SearchMigratableResourcesPager: - r"""Searches all of the resources in - automl.googleapis.com, datalabeling.googleapis.com and - ml.googleapis.com that can be migrated to Vertex AI's - given location. - - Args: - request (Union[google.cloud.aiplatform_v1.types.SearchMigratableResourcesRequest, dict]): - The request object. Request message for - [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1.MigrationService.SearchMigratableResources]. - parent (str): - Required. The location that the migratable resources - should be searched from. It's the Vertex AI location - that the resources can be migrated to, not the - resources' original location. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.migration_service.pagers.SearchMigratableResourcesPager: - Response message for - [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1.MigrationService.SearchMigratableResources]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a migration_service.SearchMigratableResourcesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, migration_service.SearchMigratableResourcesRequest): - request = migration_service.SearchMigratableResourcesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.search_migratable_resources] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.SearchMigratableResourcesPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def batch_migrate_resources(self, - request: Union[migration_service.BatchMigrateResourcesRequest, dict] = None, - *, - parent: str = None, - migrate_resource_requests: Sequence[migration_service.MigrateResourceRequest] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: - r"""Batch migrates resources from ml.googleapis.com, - automl.googleapis.com, and datalabeling.googleapis.com - to Vertex AI. - - Args: - request (Union[google.cloud.aiplatform_v1.types.BatchMigrateResourcesRequest, dict]): - The request object. Request message for - [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1.MigrationService.BatchMigrateResources]. - parent (str): - Required. The location of the migrated resource will - live in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - migrate_resource_requests (Sequence[google.cloud.aiplatform_v1.types.MigrateResourceRequest]): - Required. The request messages - specifying the resources to migrate. - They must be in the same location as the - destination. Up to 50 resources can be - migrated in one batch. - - This corresponds to the ``migrate_resource_requests`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1.types.BatchMigrateResourcesResponse` - Response message for - [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1.MigrationService.BatchMigrateResources]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, migrate_resource_requests]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a migration_service.BatchMigrateResourcesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, migration_service.BatchMigrateResourcesRequest): - request = migration_service.BatchMigrateResourcesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if migrate_resource_requests is not None: - request.migrate_resource_requests = migrate_resource_requests - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.batch_migrate_resources] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - migration_service.BatchMigrateResourcesResponse, - metadata_type=migration_service.BatchMigrateResourcesOperationMetadata, - ) - - # Done; return the response. - return response - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - """Releases underlying transport's resources. - - .. warning:: - ONLY use as a context manager if the transport is NOT shared - with other clients! Exiting the with block will CLOSE the transport - and may cause errors in other clients! - """ - self.transport.close() - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "MigrationServiceClient", -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/pagers.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/pagers.py deleted file mode 100644 index e73590f290..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/pagers.py +++ /dev/null @@ -1,141 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator - -from google.cloud.aiplatform_v1.types import migratable_resource -from google.cloud.aiplatform_v1.types import migration_service - - -class SearchMigratableResourcesPager: - """A pager for iterating through ``search_migratable_resources`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.SearchMigratableResourcesResponse` object, and - provides an ``__iter__`` method to iterate through its - ``migratable_resources`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``SearchMigratableResources`` requests and continue to iterate - through the ``migratable_resources`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.SearchMigratableResourcesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., migration_service.SearchMigratableResourcesResponse], - request: migration_service.SearchMigratableResourcesRequest, - response: migration_service.SearchMigratableResourcesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.SearchMigratableResourcesRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.SearchMigratableResourcesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = migration_service.SearchMigratableResourcesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[migration_service.SearchMigratableResourcesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[migratable_resource.MigratableResource]: - for page in self.pages: - yield from page.migratable_resources - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class SearchMigratableResourcesAsyncPager: - """A pager for iterating through ``search_migratable_resources`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.SearchMigratableResourcesResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``migratable_resources`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``SearchMigratableResources`` requests and continue to iterate - through the ``migratable_resources`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.SearchMigratableResourcesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[migration_service.SearchMigratableResourcesResponse]], - request: migration_service.SearchMigratableResourcesRequest, - response: migration_service.SearchMigratableResourcesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.SearchMigratableResourcesRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.SearchMigratableResourcesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = migration_service.SearchMigratableResourcesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[migration_service.SearchMigratableResourcesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[migratable_resource.MigratableResource]: - async def async_generator(): - async for page in self.pages: - for response in page.migratable_resources: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/__init__.py deleted file mode 100644 index 8f036c410e..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import MigrationServiceTransport -from .grpc import MigrationServiceGrpcTransport -from .grpc_asyncio import MigrationServiceGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[MigrationServiceTransport]] -_transport_registry['grpc'] = MigrationServiceGrpcTransport -_transport_registry['grpc_asyncio'] = MigrationServiceGrpcAsyncIOTransport - -__all__ = ( - 'MigrationServiceTransport', - 'MigrationServiceGrpcTransport', - 'MigrationServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/base.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/base.py deleted file mode 100644 index 5d81875cd6..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/base.py +++ /dev/null @@ -1,167 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import pkg_resources - -import google.auth # type: ignore -import google.api_core -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.aiplatform_v1.types import migration_service -from google.longrunning import operations_pb2 # type: ignore - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-aiplatform', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -class MigrationServiceTransport(abc.ABC): - """Abstract transport class for MigrationService.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'aiplatform.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials are service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.search_migratable_resources: gapic_v1.method.wrap_method( - self.search_migratable_resources, - default_timeout=None, - client_info=client_info, - ), - self.batch_migrate_resources: gapic_v1.method.wrap_method( - self.batch_migrate_resources, - default_timeout=None, - client_info=client_info, - ), - } - - def close(self): - """Closes resources associated with the transport. - - .. warning:: - Only call this method if the transport is NOT shared - with other clients - this may cause errors in other clients! - """ - raise NotImplementedError() - - @property - def operations_client(self): - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def search_migratable_resources(self) -> Callable[ - [migration_service.SearchMigratableResourcesRequest], - Union[ - migration_service.SearchMigratableResourcesResponse, - Awaitable[migration_service.SearchMigratableResourcesResponse] - ]]: - raise NotImplementedError() - - @property - def batch_migrate_resources(self) -> Callable[ - [migration_service.BatchMigrateResourcesRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'MigrationServiceTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/grpc.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/grpc.py deleted file mode 100644 index 575d1270cb..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/grpc.py +++ /dev/null @@ -1,305 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers -from google.api_core import operations_v1 -from google.api_core import gapic_v1 -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.aiplatform_v1.types import migration_service -from google.longrunning import operations_pb2 # type: ignore -from .base import MigrationServiceTransport, DEFAULT_CLIENT_INFO - - -class MigrationServiceGrpcTransport(MigrationServiceTransport): - """gRPC backend transport for MigrationService. - - A service that migrates resources from automl.googleapis.com, - datalabeling.googleapis.com and ml.googleapis.com to Vertex AI. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def search_migratable_resources(self) -> Callable[ - [migration_service.SearchMigratableResourcesRequest], - migration_service.SearchMigratableResourcesResponse]: - r"""Return a callable for the search migratable resources method over gRPC. - - Searches all of the resources in - automl.googleapis.com, datalabeling.googleapis.com and - ml.googleapis.com that can be migrated to Vertex AI's - given location. - - Returns: - Callable[[~.SearchMigratableResourcesRequest], - ~.SearchMigratableResourcesResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'search_migratable_resources' not in self._stubs: - self._stubs['search_migratable_resources'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MigrationService/SearchMigratableResources', - request_serializer=migration_service.SearchMigratableResourcesRequest.serialize, - response_deserializer=migration_service.SearchMigratableResourcesResponse.deserialize, - ) - return self._stubs['search_migratable_resources'] - - @property - def batch_migrate_resources(self) -> Callable[ - [migration_service.BatchMigrateResourcesRequest], - operations_pb2.Operation]: - r"""Return a callable for the batch migrate resources method over gRPC. - - Batch migrates resources from ml.googleapis.com, - automl.googleapis.com, and datalabeling.googleapis.com - to Vertex AI. - - Returns: - Callable[[~.BatchMigrateResourcesRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'batch_migrate_resources' not in self._stubs: - self._stubs['batch_migrate_resources'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MigrationService/BatchMigrateResources', - request_serializer=migration_service.BatchMigrateResourcesRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['batch_migrate_resources'] - - def close(self): - self.grpc_channel.close() - -__all__ = ( - 'MigrationServiceGrpcTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/grpc_asyncio.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/grpc_asyncio.py deleted file mode 100644 index 56167de898..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/migration_service/transports/grpc_asyncio.py +++ /dev/null @@ -1,309 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers_async -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.aiplatform_v1.types import migration_service -from google.longrunning import operations_pb2 # type: ignore -from .base import MigrationServiceTransport, DEFAULT_CLIENT_INFO -from .grpc import MigrationServiceGrpcTransport - - -class MigrationServiceGrpcAsyncIOTransport(MigrationServiceTransport): - """gRPC AsyncIO backend transport for MigrationService. - - A service that migrates resources from automl.googleapis.com, - datalabeling.googleapis.com and ml.googleapis.com to Vertex AI. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsAsyncClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def search_migratable_resources(self) -> Callable[ - [migration_service.SearchMigratableResourcesRequest], - Awaitable[migration_service.SearchMigratableResourcesResponse]]: - r"""Return a callable for the search migratable resources method over gRPC. - - Searches all of the resources in - automl.googleapis.com, datalabeling.googleapis.com and - ml.googleapis.com that can be migrated to Vertex AI's - given location. - - Returns: - Callable[[~.SearchMigratableResourcesRequest], - Awaitable[~.SearchMigratableResourcesResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'search_migratable_resources' not in self._stubs: - self._stubs['search_migratable_resources'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MigrationService/SearchMigratableResources', - request_serializer=migration_service.SearchMigratableResourcesRequest.serialize, - response_deserializer=migration_service.SearchMigratableResourcesResponse.deserialize, - ) - return self._stubs['search_migratable_resources'] - - @property - def batch_migrate_resources(self) -> Callable[ - [migration_service.BatchMigrateResourcesRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the batch migrate resources method over gRPC. - - Batch migrates resources from ml.googleapis.com, - automl.googleapis.com, and datalabeling.googleapis.com - to Vertex AI. - - Returns: - Callable[[~.BatchMigrateResourcesRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'batch_migrate_resources' not in self._stubs: - self._stubs['batch_migrate_resources'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.MigrationService/BatchMigrateResources', - request_serializer=migration_service.BatchMigrateResourcesRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['batch_migrate_resources'] - - def close(self): - return self.grpc_channel.close() - - -__all__ = ( - 'MigrationServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/__init__.py deleted file mode 100644 index 5c4d570d15..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import ModelServiceClient -from .async_client import ModelServiceAsyncClient - -__all__ = ( - 'ModelServiceClient', - 'ModelServiceAsyncClient', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/async_client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/async_client.py deleted file mode 100644 index 5d765836a9..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/async_client.py +++ /dev/null @@ -1,1071 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core.client_options import ClientOptions -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1.services.model_service import pagers -from google.cloud.aiplatform_v1.types import deployed_model_ref -from google.cloud.aiplatform_v1.types import encryption_spec -from google.cloud.aiplatform_v1.types import explanation -from google.cloud.aiplatform_v1.types import model -from google.cloud.aiplatform_v1.types import model as gca_model -from google.cloud.aiplatform_v1.types import model_evaluation -from google.cloud.aiplatform_v1.types import model_evaluation_slice -from google.cloud.aiplatform_v1.types import model_service -from google.cloud.aiplatform_v1.types import operation as gca_operation -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import ModelServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import ModelServiceGrpcAsyncIOTransport -from .client import ModelServiceClient - - -class ModelServiceAsyncClient: - """A service for managing Vertex AI's machine learning Models.""" - - _client: ModelServiceClient - - DEFAULT_ENDPOINT = ModelServiceClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = ModelServiceClient.DEFAULT_MTLS_ENDPOINT - - endpoint_path = staticmethod(ModelServiceClient.endpoint_path) - parse_endpoint_path = staticmethod(ModelServiceClient.parse_endpoint_path) - model_path = staticmethod(ModelServiceClient.model_path) - parse_model_path = staticmethod(ModelServiceClient.parse_model_path) - model_evaluation_path = staticmethod(ModelServiceClient.model_evaluation_path) - parse_model_evaluation_path = staticmethod(ModelServiceClient.parse_model_evaluation_path) - model_evaluation_slice_path = staticmethod(ModelServiceClient.model_evaluation_slice_path) - parse_model_evaluation_slice_path = staticmethod(ModelServiceClient.parse_model_evaluation_slice_path) - training_pipeline_path = staticmethod(ModelServiceClient.training_pipeline_path) - parse_training_pipeline_path = staticmethod(ModelServiceClient.parse_training_pipeline_path) - common_billing_account_path = staticmethod(ModelServiceClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(ModelServiceClient.parse_common_billing_account_path) - common_folder_path = staticmethod(ModelServiceClient.common_folder_path) - parse_common_folder_path = staticmethod(ModelServiceClient.parse_common_folder_path) - common_organization_path = staticmethod(ModelServiceClient.common_organization_path) - parse_common_organization_path = staticmethod(ModelServiceClient.parse_common_organization_path) - common_project_path = staticmethod(ModelServiceClient.common_project_path) - parse_common_project_path = staticmethod(ModelServiceClient.parse_common_project_path) - common_location_path = staticmethod(ModelServiceClient.common_location_path) - parse_common_location_path = staticmethod(ModelServiceClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - ModelServiceAsyncClient: The constructed client. - """ - return ModelServiceClient.from_service_account_info.__func__(ModelServiceAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - ModelServiceAsyncClient: The constructed client. - """ - return ModelServiceClient.from_service_account_file.__func__(ModelServiceAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> ModelServiceTransport: - """Returns the transport used by the client instance. - - Returns: - ModelServiceTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(ModelServiceClient).get_transport_class, type(ModelServiceClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, ModelServiceTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the model service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.ModelServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = ModelServiceClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def upload_model(self, - request: Union[model_service.UploadModelRequest, dict] = None, - *, - parent: str = None, - model: gca_model.Model = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Uploads a Model artifact into Vertex AI. - - Args: - request (Union[google.cloud.aiplatform_v1.types.UploadModelRequest, dict]): - The request object. Request message for - [ModelService.UploadModel][google.cloud.aiplatform.v1.ModelService.UploadModel]. - parent (:class:`str`): - Required. The resource name of the Location into which - to upload the Model. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - model (:class:`google.cloud.aiplatform_v1.types.Model`): - Required. The Model to create. - This corresponds to the ``model`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1.types.UploadModelResponse` - Response message of - [ModelService.UploadModel][google.cloud.aiplatform.v1.ModelService.UploadModel] - operation. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, model]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = model_service.UploadModelRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if model is not None: - request.model = model - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.upload_model, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - model_service.UploadModelResponse, - metadata_type=model_service.UploadModelOperationMetadata, - ) - - # Done; return the response. - return response - - async def get_model(self, - request: Union[model_service.GetModelRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> model.Model: - r"""Gets a Model. - - Args: - request (Union[google.cloud.aiplatform_v1.types.GetModelRequest, dict]): - The request object. Request message for - [ModelService.GetModel][google.cloud.aiplatform.v1.ModelService.GetModel]. - name (:class:`str`): - Required. The name of the Model resource. Format: - ``projects/{project}/locations/{location}/models/{model}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Model: - A trained machine learning Model. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = model_service.GetModelRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_model, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_models(self, - request: Union[model_service.ListModelsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListModelsAsyncPager: - r"""Lists Models in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListModelsRequest, dict]): - The request object. Request message for - [ModelService.ListModels][google.cloud.aiplatform.v1.ModelService.ListModels]. - parent (:class:`str`): - Required. The resource name of the Location to list the - Models from. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.model_service.pagers.ListModelsAsyncPager: - Response message for - [ModelService.ListModels][google.cloud.aiplatform.v1.ModelService.ListModels] - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = model_service.ListModelsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_models, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListModelsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_model(self, - request: Union[model_service.UpdateModelRequest, dict] = None, - *, - model: gca_model.Model = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_model.Model: - r"""Updates a Model. - - Args: - request (Union[google.cloud.aiplatform_v1.types.UpdateModelRequest, dict]): - The request object. Request message for - [ModelService.UpdateModel][google.cloud.aiplatform.v1.ModelService.UpdateModel]. - model (:class:`google.cloud.aiplatform_v1.types.Model`): - Required. The Model which replaces - the resource on the server. - - This corresponds to the ``model`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. The update mask applies to the resource. For - the ``FieldMask`` definition, see - [google.protobuf.FieldMask][google.protobuf.FieldMask]. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Model: - A trained machine learning Model. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([model, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = model_service.UpdateModelRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if model is not None: - request.model = model - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_model, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("model.name", request.model.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_model(self, - request: Union[model_service.DeleteModelRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a Model. - - A model cannot be deleted if any - [Endpoint][google.cloud.aiplatform.v1.Endpoint] resource has a - [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] based - on the model in its - [deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models] - field. - - Args: - request (Union[google.cloud.aiplatform_v1.types.DeleteModelRequest, dict]): - The request object. Request message for - [ModelService.DeleteModel][google.cloud.aiplatform.v1.ModelService.DeleteModel]. - name (:class:`str`): - Required. The name of the Model resource to be deleted. - Format: - ``projects/{project}/locations/{location}/models/{model}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = model_service.DeleteModelRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_model, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def export_model(self, - request: Union[model_service.ExportModelRequest, dict] = None, - *, - name: str = None, - output_config: model_service.ExportModelRequest.OutputConfig = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Exports a trained, exportable Model to a location specified by - the user. A Model is considered to be exportable if it has at - least one [supported export - format][google.cloud.aiplatform.v1.Model.supported_export_formats]. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ExportModelRequest, dict]): - The request object. Request message for - [ModelService.ExportModel][google.cloud.aiplatform.v1.ModelService.ExportModel]. - name (:class:`str`): - Required. The resource name of the - Model to export. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - output_config (:class:`google.cloud.aiplatform_v1.types.ExportModelRequest.OutputConfig`): - Required. The desired output location - and configuration. - - This corresponds to the ``output_config`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1.types.ExportModelResponse` - Response message of - [ModelService.ExportModel][google.cloud.aiplatform.v1.ModelService.ExportModel] - operation. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, output_config]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = model_service.ExportModelRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if output_config is not None: - request.output_config = output_config - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.export_model, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - model_service.ExportModelResponse, - metadata_type=model_service.ExportModelOperationMetadata, - ) - - # Done; return the response. - return response - - async def get_model_evaluation(self, - request: Union[model_service.GetModelEvaluationRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> model_evaluation.ModelEvaluation: - r"""Gets a ModelEvaluation. - - Args: - request (Union[google.cloud.aiplatform_v1.types.GetModelEvaluationRequest, dict]): - The request object. Request message for - [ModelService.GetModelEvaluation][google.cloud.aiplatform.v1.ModelService.GetModelEvaluation]. - name (:class:`str`): - Required. The name of the ModelEvaluation resource. - Format: - ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.ModelEvaluation: - A collection of metrics calculated by - comparing Model's predictions on all of - the test data against annotations from - the test data. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = model_service.GetModelEvaluationRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_model_evaluation, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_model_evaluations(self, - request: Union[model_service.ListModelEvaluationsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListModelEvaluationsAsyncPager: - r"""Lists ModelEvaluations in a Model. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListModelEvaluationsRequest, dict]): - The request object. Request message for - [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1.ModelService.ListModelEvaluations]. - parent (:class:`str`): - Required. The resource name of the Model to list the - ModelEvaluations from. Format: - ``projects/{project}/locations/{location}/models/{model}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.model_service.pagers.ListModelEvaluationsAsyncPager: - Response message for - [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1.ModelService.ListModelEvaluations]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = model_service.ListModelEvaluationsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_model_evaluations, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListModelEvaluationsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_model_evaluation_slice(self, - request: Union[model_service.GetModelEvaluationSliceRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> model_evaluation_slice.ModelEvaluationSlice: - r"""Gets a ModelEvaluationSlice. - - Args: - request (Union[google.cloud.aiplatform_v1.types.GetModelEvaluationSliceRequest, dict]): - The request object. Request message for - [ModelService.GetModelEvaluationSlice][google.cloud.aiplatform.v1.ModelService.GetModelEvaluationSlice]. - name (:class:`str`): - Required. The name of the ModelEvaluationSlice resource. - Format: - ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.ModelEvaluationSlice: - A collection of metrics calculated by - comparing Model's predictions on a slice - of the test data against ground truth - annotations. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = model_service.GetModelEvaluationSliceRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_model_evaluation_slice, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_model_evaluation_slices(self, - request: Union[model_service.ListModelEvaluationSlicesRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListModelEvaluationSlicesAsyncPager: - r"""Lists ModelEvaluationSlices in a ModelEvaluation. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesRequest, dict]): - The request object. Request message for - [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1.ModelService.ListModelEvaluationSlices]. - parent (:class:`str`): - Required. The resource name of the ModelEvaluation to - list the ModelEvaluationSlices from. Format: - ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.model_service.pagers.ListModelEvaluationSlicesAsyncPager: - Response message for - [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1.ModelService.ListModelEvaluationSlices]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = model_service.ListModelEvaluationSlicesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_model_evaluation_slices, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListModelEvaluationSlicesAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def __aenter__(self): - return self - - async def __aexit__(self, exc_type, exc, tb): - await self.transport.close() - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "ModelServiceAsyncClient", -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/client.py deleted file mode 100644 index 393e68afeb..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/client.py +++ /dev/null @@ -1,1305 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import os -import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1.services.model_service import pagers -from google.cloud.aiplatform_v1.types import deployed_model_ref -from google.cloud.aiplatform_v1.types import encryption_spec -from google.cloud.aiplatform_v1.types import explanation -from google.cloud.aiplatform_v1.types import model -from google.cloud.aiplatform_v1.types import model as gca_model -from google.cloud.aiplatform_v1.types import model_evaluation -from google.cloud.aiplatform_v1.types import model_evaluation_slice -from google.cloud.aiplatform_v1.types import model_service -from google.cloud.aiplatform_v1.types import operation as gca_operation -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import ModelServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import ModelServiceGrpcTransport -from .transports.grpc_asyncio import ModelServiceGrpcAsyncIOTransport - - -class ModelServiceClientMeta(type): - """Metaclass for the ModelService client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[ModelServiceTransport]] - _transport_registry["grpc"] = ModelServiceGrpcTransport - _transport_registry["grpc_asyncio"] = ModelServiceGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[ModelServiceTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class ModelServiceClient(metaclass=ModelServiceClientMeta): - """A service for managing Vertex AI's machine learning Models.""" - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "aiplatform.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - ModelServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - ModelServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> ModelServiceTransport: - """Returns the transport used by the client instance. - - Returns: - ModelServiceTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def endpoint_path(project: str,location: str,endpoint: str,) -> str: - """Returns a fully-qualified endpoint string.""" - return "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) - - @staticmethod - def parse_endpoint_path(path: str) -> Dict[str,str]: - """Parses a endpoint path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/endpoints/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def model_path(project: str,location: str,model: str,) -> str: - """Returns a fully-qualified model string.""" - return "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) - - @staticmethod - def parse_model_path(path: str) -> Dict[str,str]: - """Parses a model path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def model_evaluation_path(project: str,location: str,model: str,evaluation: str,) -> str: - """Returns a fully-qualified model_evaluation string.""" - return "projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}".format(project=project, location=location, model=model, evaluation=evaluation, ) - - @staticmethod - def parse_model_evaluation_path(path: str) -> Dict[str,str]: - """Parses a model_evaluation path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)/evaluations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def model_evaluation_slice_path(project: str,location: str,model: str,evaluation: str,slice: str,) -> str: - """Returns a fully-qualified model_evaluation_slice string.""" - return "projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}".format(project=project, location=location, model=model, evaluation=evaluation, slice=slice, ) - - @staticmethod - def parse_model_evaluation_slice_path(path: str) -> Dict[str,str]: - """Parses a model_evaluation_slice path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)/evaluations/(?P.+?)/slices/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def training_pipeline_path(project: str,location: str,training_pipeline: str,) -> str: - """Returns a fully-qualified training_pipeline string.""" - return "projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}".format(project=project, location=location, training_pipeline=training_pipeline, ) - - @staticmethod - def parse_training_pipeline_path(path: str) -> Dict[str,str]: - """Parses a training_pipeline path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/trainingPipelines/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, ModelServiceTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the model service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ModelServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): - raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, ModelServiceTransport): - # transport is a ModelServiceTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - always_use_jwt_access=True, - ) - - def upload_model(self, - request: Union[model_service.UploadModelRequest, dict] = None, - *, - parent: str = None, - model: gca_model.Model = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Uploads a Model artifact into Vertex AI. - - Args: - request (Union[google.cloud.aiplatform_v1.types.UploadModelRequest, dict]): - The request object. Request message for - [ModelService.UploadModel][google.cloud.aiplatform.v1.ModelService.UploadModel]. - parent (str): - Required. The resource name of the Location into which - to upload the Model. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - model (google.cloud.aiplatform_v1.types.Model): - Required. The Model to create. - This corresponds to the ``model`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1.types.UploadModelResponse` - Response message of - [ModelService.UploadModel][google.cloud.aiplatform.v1.ModelService.UploadModel] - operation. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, model]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a model_service.UploadModelRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, model_service.UploadModelRequest): - request = model_service.UploadModelRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if model is not None: - request.model = model - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.upload_model] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - model_service.UploadModelResponse, - metadata_type=model_service.UploadModelOperationMetadata, - ) - - # Done; return the response. - return response - - def get_model(self, - request: Union[model_service.GetModelRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> model.Model: - r"""Gets a Model. - - Args: - request (Union[google.cloud.aiplatform_v1.types.GetModelRequest, dict]): - The request object. Request message for - [ModelService.GetModel][google.cloud.aiplatform.v1.ModelService.GetModel]. - name (str): - Required. The name of the Model resource. Format: - ``projects/{project}/locations/{location}/models/{model}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Model: - A trained machine learning Model. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a model_service.GetModelRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, model_service.GetModelRequest): - request = model_service.GetModelRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_model] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_models(self, - request: Union[model_service.ListModelsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListModelsPager: - r"""Lists Models in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListModelsRequest, dict]): - The request object. Request message for - [ModelService.ListModels][google.cloud.aiplatform.v1.ModelService.ListModels]. - parent (str): - Required. The resource name of the Location to list the - Models from. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.model_service.pagers.ListModelsPager: - Response message for - [ModelService.ListModels][google.cloud.aiplatform.v1.ModelService.ListModels] - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a model_service.ListModelsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, model_service.ListModelsRequest): - request = model_service.ListModelsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_models] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListModelsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_model(self, - request: Union[model_service.UpdateModelRequest, dict] = None, - *, - model: gca_model.Model = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_model.Model: - r"""Updates a Model. - - Args: - request (Union[google.cloud.aiplatform_v1.types.UpdateModelRequest, dict]): - The request object. Request message for - [ModelService.UpdateModel][google.cloud.aiplatform.v1.ModelService.UpdateModel]. - model (google.cloud.aiplatform_v1.types.Model): - Required. The Model which replaces - the resource on the server. - - This corresponds to the ``model`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The update mask applies to the resource. For - the ``FieldMask`` definition, see - [google.protobuf.FieldMask][google.protobuf.FieldMask]. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Model: - A trained machine learning Model. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([model, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a model_service.UpdateModelRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, model_service.UpdateModelRequest): - request = model_service.UpdateModelRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if model is not None: - request.model = model - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_model] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("model.name", request.model.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_model(self, - request: Union[model_service.DeleteModelRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a Model. - - A model cannot be deleted if any - [Endpoint][google.cloud.aiplatform.v1.Endpoint] resource has a - [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] based - on the model in its - [deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models] - field. - - Args: - request (Union[google.cloud.aiplatform_v1.types.DeleteModelRequest, dict]): - The request object. Request message for - [ModelService.DeleteModel][google.cloud.aiplatform.v1.ModelService.DeleteModel]. - name (str): - Required. The name of the Model resource to be deleted. - Format: - ``projects/{project}/locations/{location}/models/{model}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a model_service.DeleteModelRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, model_service.DeleteModelRequest): - request = model_service.DeleteModelRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_model] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def export_model(self, - request: Union[model_service.ExportModelRequest, dict] = None, - *, - name: str = None, - output_config: model_service.ExportModelRequest.OutputConfig = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Exports a trained, exportable Model to a location specified by - the user. A Model is considered to be exportable if it has at - least one [supported export - format][google.cloud.aiplatform.v1.Model.supported_export_formats]. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ExportModelRequest, dict]): - The request object. Request message for - [ModelService.ExportModel][google.cloud.aiplatform.v1.ModelService.ExportModel]. - name (str): - Required. The resource name of the - Model to export. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - output_config (google.cloud.aiplatform_v1.types.ExportModelRequest.OutputConfig): - Required. The desired output location - and configuration. - - This corresponds to the ``output_config`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1.types.ExportModelResponse` - Response message of - [ModelService.ExportModel][google.cloud.aiplatform.v1.ModelService.ExportModel] - operation. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, output_config]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a model_service.ExportModelRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, model_service.ExportModelRequest): - request = model_service.ExportModelRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if output_config is not None: - request.output_config = output_config - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.export_model] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - model_service.ExportModelResponse, - metadata_type=model_service.ExportModelOperationMetadata, - ) - - # Done; return the response. - return response - - def get_model_evaluation(self, - request: Union[model_service.GetModelEvaluationRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> model_evaluation.ModelEvaluation: - r"""Gets a ModelEvaluation. - - Args: - request (Union[google.cloud.aiplatform_v1.types.GetModelEvaluationRequest, dict]): - The request object. Request message for - [ModelService.GetModelEvaluation][google.cloud.aiplatform.v1.ModelService.GetModelEvaluation]. - name (str): - Required. The name of the ModelEvaluation resource. - Format: - ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.ModelEvaluation: - A collection of metrics calculated by - comparing Model's predictions on all of - the test data against annotations from - the test data. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a model_service.GetModelEvaluationRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, model_service.GetModelEvaluationRequest): - request = model_service.GetModelEvaluationRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_model_evaluation] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_model_evaluations(self, - request: Union[model_service.ListModelEvaluationsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListModelEvaluationsPager: - r"""Lists ModelEvaluations in a Model. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListModelEvaluationsRequest, dict]): - The request object. Request message for - [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1.ModelService.ListModelEvaluations]. - parent (str): - Required. The resource name of the Model to list the - ModelEvaluations from. Format: - ``projects/{project}/locations/{location}/models/{model}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.model_service.pagers.ListModelEvaluationsPager: - Response message for - [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1.ModelService.ListModelEvaluations]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a model_service.ListModelEvaluationsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, model_service.ListModelEvaluationsRequest): - request = model_service.ListModelEvaluationsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_model_evaluations] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListModelEvaluationsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_model_evaluation_slice(self, - request: Union[model_service.GetModelEvaluationSliceRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> model_evaluation_slice.ModelEvaluationSlice: - r"""Gets a ModelEvaluationSlice. - - Args: - request (Union[google.cloud.aiplatform_v1.types.GetModelEvaluationSliceRequest, dict]): - The request object. Request message for - [ModelService.GetModelEvaluationSlice][google.cloud.aiplatform.v1.ModelService.GetModelEvaluationSlice]. - name (str): - Required. The name of the ModelEvaluationSlice resource. - Format: - ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.ModelEvaluationSlice: - A collection of metrics calculated by - comparing Model's predictions on a slice - of the test data against ground truth - annotations. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a model_service.GetModelEvaluationSliceRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, model_service.GetModelEvaluationSliceRequest): - request = model_service.GetModelEvaluationSliceRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_model_evaluation_slice] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_model_evaluation_slices(self, - request: Union[model_service.ListModelEvaluationSlicesRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListModelEvaluationSlicesPager: - r"""Lists ModelEvaluationSlices in a ModelEvaluation. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesRequest, dict]): - The request object. Request message for - [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1.ModelService.ListModelEvaluationSlices]. - parent (str): - Required. The resource name of the ModelEvaluation to - list the ModelEvaluationSlices from. Format: - ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.model_service.pagers.ListModelEvaluationSlicesPager: - Response message for - [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1.ModelService.ListModelEvaluationSlices]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a model_service.ListModelEvaluationSlicesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, model_service.ListModelEvaluationSlicesRequest): - request = model_service.ListModelEvaluationSlicesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_model_evaluation_slices] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListModelEvaluationSlicesPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - """Releases underlying transport's resources. - - .. warning:: - ONLY use as a context manager if the transport is NOT shared - with other clients! Exiting the with block will CLOSE the transport - and may cause errors in other clients! - """ - self.transport.close() - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "ModelServiceClient", -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/pagers.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/pagers.py deleted file mode 100644 index 3e3f936cbb..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/pagers.py +++ /dev/null @@ -1,387 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator - -from google.cloud.aiplatform_v1.types import model -from google.cloud.aiplatform_v1.types import model_evaluation -from google.cloud.aiplatform_v1.types import model_evaluation_slice -from google.cloud.aiplatform_v1.types import model_service - - -class ListModelsPager: - """A pager for iterating through ``list_models`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListModelsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``models`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListModels`` requests and continue to iterate - through the ``models`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListModelsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., model_service.ListModelsResponse], - request: model_service.ListModelsRequest, - response: model_service.ListModelsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListModelsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListModelsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = model_service.ListModelsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[model_service.ListModelsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[model.Model]: - for page in self.pages: - yield from page.models - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListModelsAsyncPager: - """A pager for iterating through ``list_models`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListModelsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``models`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListModels`` requests and continue to iterate - through the ``models`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListModelsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[model_service.ListModelsResponse]], - request: model_service.ListModelsRequest, - response: model_service.ListModelsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListModelsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListModelsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = model_service.ListModelsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[model_service.ListModelsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[model.Model]: - async def async_generator(): - async for page in self.pages: - for response in page.models: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListModelEvaluationsPager: - """A pager for iterating through ``list_model_evaluations`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListModelEvaluationsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``model_evaluations`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListModelEvaluations`` requests and continue to iterate - through the ``model_evaluations`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListModelEvaluationsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., model_service.ListModelEvaluationsResponse], - request: model_service.ListModelEvaluationsRequest, - response: model_service.ListModelEvaluationsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListModelEvaluationsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListModelEvaluationsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = model_service.ListModelEvaluationsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[model_service.ListModelEvaluationsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[model_evaluation.ModelEvaluation]: - for page in self.pages: - yield from page.model_evaluations - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListModelEvaluationsAsyncPager: - """A pager for iterating through ``list_model_evaluations`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListModelEvaluationsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``model_evaluations`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListModelEvaluations`` requests and continue to iterate - through the ``model_evaluations`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListModelEvaluationsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[model_service.ListModelEvaluationsResponse]], - request: model_service.ListModelEvaluationsRequest, - response: model_service.ListModelEvaluationsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListModelEvaluationsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListModelEvaluationsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = model_service.ListModelEvaluationsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[model_service.ListModelEvaluationsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[model_evaluation.ModelEvaluation]: - async def async_generator(): - async for page in self.pages: - for response in page.model_evaluations: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListModelEvaluationSlicesPager: - """A pager for iterating through ``list_model_evaluation_slices`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesResponse` object, and - provides an ``__iter__`` method to iterate through its - ``model_evaluation_slices`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListModelEvaluationSlices`` requests and continue to iterate - through the ``model_evaluation_slices`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., model_service.ListModelEvaluationSlicesResponse], - request: model_service.ListModelEvaluationSlicesRequest, - response: model_service.ListModelEvaluationSlicesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = model_service.ListModelEvaluationSlicesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[model_service.ListModelEvaluationSlicesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[model_evaluation_slice.ModelEvaluationSlice]: - for page in self.pages: - yield from page.model_evaluation_slices - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListModelEvaluationSlicesAsyncPager: - """A pager for iterating through ``list_model_evaluation_slices`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``model_evaluation_slices`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListModelEvaluationSlices`` requests and continue to iterate - through the ``model_evaluation_slices`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[model_service.ListModelEvaluationSlicesResponse]], - request: model_service.ListModelEvaluationSlicesRequest, - response: model_service.ListModelEvaluationSlicesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = model_service.ListModelEvaluationSlicesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[model_service.ListModelEvaluationSlicesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[model_evaluation_slice.ModelEvaluationSlice]: - async def async_generator(): - async for page in self.pages: - for response in page.model_evaluation_slices: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/__init__.py deleted file mode 100644 index 0f09224d3c..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import ModelServiceTransport -from .grpc import ModelServiceGrpcTransport -from .grpc_asyncio import ModelServiceGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[ModelServiceTransport]] -_transport_registry['grpc'] = ModelServiceGrpcTransport -_transport_registry['grpc_asyncio'] = ModelServiceGrpcAsyncIOTransport - -__all__ = ( - 'ModelServiceTransport', - 'ModelServiceGrpcTransport', - 'ModelServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/base.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/base.py deleted file mode 100644 index 623e6b042d..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/base.py +++ /dev/null @@ -1,283 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import pkg_resources - -import google.auth # type: ignore -import google.api_core -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.aiplatform_v1.types import model -from google.cloud.aiplatform_v1.types import model as gca_model -from google.cloud.aiplatform_v1.types import model_evaluation -from google.cloud.aiplatform_v1.types import model_evaluation_slice -from google.cloud.aiplatform_v1.types import model_service -from google.longrunning import operations_pb2 # type: ignore - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-aiplatform', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -class ModelServiceTransport(abc.ABC): - """Abstract transport class for ModelService.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'aiplatform.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials are service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.upload_model: gapic_v1.method.wrap_method( - self.upload_model, - default_timeout=None, - client_info=client_info, - ), - self.get_model: gapic_v1.method.wrap_method( - self.get_model, - default_timeout=None, - client_info=client_info, - ), - self.list_models: gapic_v1.method.wrap_method( - self.list_models, - default_timeout=None, - client_info=client_info, - ), - self.update_model: gapic_v1.method.wrap_method( - self.update_model, - default_timeout=None, - client_info=client_info, - ), - self.delete_model: gapic_v1.method.wrap_method( - self.delete_model, - default_timeout=None, - client_info=client_info, - ), - self.export_model: gapic_v1.method.wrap_method( - self.export_model, - default_timeout=None, - client_info=client_info, - ), - self.get_model_evaluation: gapic_v1.method.wrap_method( - self.get_model_evaluation, - default_timeout=None, - client_info=client_info, - ), - self.list_model_evaluations: gapic_v1.method.wrap_method( - self.list_model_evaluations, - default_timeout=None, - client_info=client_info, - ), - self.get_model_evaluation_slice: gapic_v1.method.wrap_method( - self.get_model_evaluation_slice, - default_timeout=None, - client_info=client_info, - ), - self.list_model_evaluation_slices: gapic_v1.method.wrap_method( - self.list_model_evaluation_slices, - default_timeout=None, - client_info=client_info, - ), - } - - def close(self): - """Closes resources associated with the transport. - - .. warning:: - Only call this method if the transport is NOT shared - with other clients - this may cause errors in other clients! - """ - raise NotImplementedError() - - @property - def operations_client(self): - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def upload_model(self) -> Callable[ - [model_service.UploadModelRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def get_model(self) -> Callable[ - [model_service.GetModelRequest], - Union[ - model.Model, - Awaitable[model.Model] - ]]: - raise NotImplementedError() - - @property - def list_models(self) -> Callable[ - [model_service.ListModelsRequest], - Union[ - model_service.ListModelsResponse, - Awaitable[model_service.ListModelsResponse] - ]]: - raise NotImplementedError() - - @property - def update_model(self) -> Callable[ - [model_service.UpdateModelRequest], - Union[ - gca_model.Model, - Awaitable[gca_model.Model] - ]]: - raise NotImplementedError() - - @property - def delete_model(self) -> Callable[ - [model_service.DeleteModelRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def export_model(self) -> Callable[ - [model_service.ExportModelRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def get_model_evaluation(self) -> Callable[ - [model_service.GetModelEvaluationRequest], - Union[ - model_evaluation.ModelEvaluation, - Awaitable[model_evaluation.ModelEvaluation] - ]]: - raise NotImplementedError() - - @property - def list_model_evaluations(self) -> Callable[ - [model_service.ListModelEvaluationsRequest], - Union[ - model_service.ListModelEvaluationsResponse, - Awaitable[model_service.ListModelEvaluationsResponse] - ]]: - raise NotImplementedError() - - @property - def get_model_evaluation_slice(self) -> Callable[ - [model_service.GetModelEvaluationSliceRequest], - Union[ - model_evaluation_slice.ModelEvaluationSlice, - Awaitable[model_evaluation_slice.ModelEvaluationSlice] - ]]: - raise NotImplementedError() - - @property - def list_model_evaluation_slices(self) -> Callable[ - [model_service.ListModelEvaluationSlicesRequest], - Union[ - model_service.ListModelEvaluationSlicesResponse, - Awaitable[model_service.ListModelEvaluationSlicesResponse] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'ModelServiceTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/grpc.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/grpc.py deleted file mode 100644 index 7394745b3e..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/grpc.py +++ /dev/null @@ -1,521 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers -from google.api_core import operations_v1 -from google.api_core import gapic_v1 -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.aiplatform_v1.types import model -from google.cloud.aiplatform_v1.types import model as gca_model -from google.cloud.aiplatform_v1.types import model_evaluation -from google.cloud.aiplatform_v1.types import model_evaluation_slice -from google.cloud.aiplatform_v1.types import model_service -from google.longrunning import operations_pb2 # type: ignore -from .base import ModelServiceTransport, DEFAULT_CLIENT_INFO - - -class ModelServiceGrpcTransport(ModelServiceTransport): - """gRPC backend transport for ModelService. - - A service for managing Vertex AI's machine learning Models. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def upload_model(self) -> Callable[ - [model_service.UploadModelRequest], - operations_pb2.Operation]: - r"""Return a callable for the upload model method over gRPC. - - Uploads a Model artifact into Vertex AI. - - Returns: - Callable[[~.UploadModelRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'upload_model' not in self._stubs: - self._stubs['upload_model'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.ModelService/UploadModel', - request_serializer=model_service.UploadModelRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['upload_model'] - - @property - def get_model(self) -> Callable[ - [model_service.GetModelRequest], - model.Model]: - r"""Return a callable for the get model method over gRPC. - - Gets a Model. - - Returns: - Callable[[~.GetModelRequest], - ~.Model]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_model' not in self._stubs: - self._stubs['get_model'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.ModelService/GetModel', - request_serializer=model_service.GetModelRequest.serialize, - response_deserializer=model.Model.deserialize, - ) - return self._stubs['get_model'] - - @property - def list_models(self) -> Callable[ - [model_service.ListModelsRequest], - model_service.ListModelsResponse]: - r"""Return a callable for the list models method over gRPC. - - Lists Models in a Location. - - Returns: - Callable[[~.ListModelsRequest], - ~.ListModelsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_models' not in self._stubs: - self._stubs['list_models'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.ModelService/ListModels', - request_serializer=model_service.ListModelsRequest.serialize, - response_deserializer=model_service.ListModelsResponse.deserialize, - ) - return self._stubs['list_models'] - - @property - def update_model(self) -> Callable[ - [model_service.UpdateModelRequest], - gca_model.Model]: - r"""Return a callable for the update model method over gRPC. - - Updates a Model. - - Returns: - Callable[[~.UpdateModelRequest], - ~.Model]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_model' not in self._stubs: - self._stubs['update_model'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.ModelService/UpdateModel', - request_serializer=model_service.UpdateModelRequest.serialize, - response_deserializer=gca_model.Model.deserialize, - ) - return self._stubs['update_model'] - - @property - def delete_model(self) -> Callable[ - [model_service.DeleteModelRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete model method over gRPC. - - Deletes a Model. - - A model cannot be deleted if any - [Endpoint][google.cloud.aiplatform.v1.Endpoint] resource has a - [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] based - on the model in its - [deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models] - field. - - Returns: - Callable[[~.DeleteModelRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_model' not in self._stubs: - self._stubs['delete_model'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.ModelService/DeleteModel', - request_serializer=model_service.DeleteModelRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_model'] - - @property - def export_model(self) -> Callable[ - [model_service.ExportModelRequest], - operations_pb2.Operation]: - r"""Return a callable for the export model method over gRPC. - - Exports a trained, exportable Model to a location specified by - the user. A Model is considered to be exportable if it has at - least one [supported export - format][google.cloud.aiplatform.v1.Model.supported_export_formats]. - - Returns: - Callable[[~.ExportModelRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'export_model' not in self._stubs: - self._stubs['export_model'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.ModelService/ExportModel', - request_serializer=model_service.ExportModelRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['export_model'] - - @property - def get_model_evaluation(self) -> Callable[ - [model_service.GetModelEvaluationRequest], - model_evaluation.ModelEvaluation]: - r"""Return a callable for the get model evaluation method over gRPC. - - Gets a ModelEvaluation. - - Returns: - Callable[[~.GetModelEvaluationRequest], - ~.ModelEvaluation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_model_evaluation' not in self._stubs: - self._stubs['get_model_evaluation'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.ModelService/GetModelEvaluation', - request_serializer=model_service.GetModelEvaluationRequest.serialize, - response_deserializer=model_evaluation.ModelEvaluation.deserialize, - ) - return self._stubs['get_model_evaluation'] - - @property - def list_model_evaluations(self) -> Callable[ - [model_service.ListModelEvaluationsRequest], - model_service.ListModelEvaluationsResponse]: - r"""Return a callable for the list model evaluations method over gRPC. - - Lists ModelEvaluations in a Model. - - Returns: - Callable[[~.ListModelEvaluationsRequest], - ~.ListModelEvaluationsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_model_evaluations' not in self._stubs: - self._stubs['list_model_evaluations'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.ModelService/ListModelEvaluations', - request_serializer=model_service.ListModelEvaluationsRequest.serialize, - response_deserializer=model_service.ListModelEvaluationsResponse.deserialize, - ) - return self._stubs['list_model_evaluations'] - - @property - def get_model_evaluation_slice(self) -> Callable[ - [model_service.GetModelEvaluationSliceRequest], - model_evaluation_slice.ModelEvaluationSlice]: - r"""Return a callable for the get model evaluation slice method over gRPC. - - Gets a ModelEvaluationSlice. - - Returns: - Callable[[~.GetModelEvaluationSliceRequest], - ~.ModelEvaluationSlice]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_model_evaluation_slice' not in self._stubs: - self._stubs['get_model_evaluation_slice'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.ModelService/GetModelEvaluationSlice', - request_serializer=model_service.GetModelEvaluationSliceRequest.serialize, - response_deserializer=model_evaluation_slice.ModelEvaluationSlice.deserialize, - ) - return self._stubs['get_model_evaluation_slice'] - - @property - def list_model_evaluation_slices(self) -> Callable[ - [model_service.ListModelEvaluationSlicesRequest], - model_service.ListModelEvaluationSlicesResponse]: - r"""Return a callable for the list model evaluation slices method over gRPC. - - Lists ModelEvaluationSlices in a ModelEvaluation. - - Returns: - Callable[[~.ListModelEvaluationSlicesRequest], - ~.ListModelEvaluationSlicesResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_model_evaluation_slices' not in self._stubs: - self._stubs['list_model_evaluation_slices'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.ModelService/ListModelEvaluationSlices', - request_serializer=model_service.ListModelEvaluationSlicesRequest.serialize, - response_deserializer=model_service.ListModelEvaluationSlicesResponse.deserialize, - ) - return self._stubs['list_model_evaluation_slices'] - - def close(self): - self.grpc_channel.close() - -__all__ = ( - 'ModelServiceGrpcTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/grpc_asyncio.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/grpc_asyncio.py deleted file mode 100644 index 20c16e41d7..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/model_service/transports/grpc_asyncio.py +++ /dev/null @@ -1,525 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers_async -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.aiplatform_v1.types import model -from google.cloud.aiplatform_v1.types import model as gca_model -from google.cloud.aiplatform_v1.types import model_evaluation -from google.cloud.aiplatform_v1.types import model_evaluation_slice -from google.cloud.aiplatform_v1.types import model_service -from google.longrunning import operations_pb2 # type: ignore -from .base import ModelServiceTransport, DEFAULT_CLIENT_INFO -from .grpc import ModelServiceGrpcTransport - - -class ModelServiceGrpcAsyncIOTransport(ModelServiceTransport): - """gRPC AsyncIO backend transport for ModelService. - - A service for managing Vertex AI's machine learning Models. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsAsyncClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def upload_model(self) -> Callable[ - [model_service.UploadModelRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the upload model method over gRPC. - - Uploads a Model artifact into Vertex AI. - - Returns: - Callable[[~.UploadModelRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'upload_model' not in self._stubs: - self._stubs['upload_model'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.ModelService/UploadModel', - request_serializer=model_service.UploadModelRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['upload_model'] - - @property - def get_model(self) -> Callable[ - [model_service.GetModelRequest], - Awaitable[model.Model]]: - r"""Return a callable for the get model method over gRPC. - - Gets a Model. - - Returns: - Callable[[~.GetModelRequest], - Awaitable[~.Model]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_model' not in self._stubs: - self._stubs['get_model'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.ModelService/GetModel', - request_serializer=model_service.GetModelRequest.serialize, - response_deserializer=model.Model.deserialize, - ) - return self._stubs['get_model'] - - @property - def list_models(self) -> Callable[ - [model_service.ListModelsRequest], - Awaitable[model_service.ListModelsResponse]]: - r"""Return a callable for the list models method over gRPC. - - Lists Models in a Location. - - Returns: - Callable[[~.ListModelsRequest], - Awaitable[~.ListModelsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_models' not in self._stubs: - self._stubs['list_models'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.ModelService/ListModels', - request_serializer=model_service.ListModelsRequest.serialize, - response_deserializer=model_service.ListModelsResponse.deserialize, - ) - return self._stubs['list_models'] - - @property - def update_model(self) -> Callable[ - [model_service.UpdateModelRequest], - Awaitable[gca_model.Model]]: - r"""Return a callable for the update model method over gRPC. - - Updates a Model. - - Returns: - Callable[[~.UpdateModelRequest], - Awaitable[~.Model]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_model' not in self._stubs: - self._stubs['update_model'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.ModelService/UpdateModel', - request_serializer=model_service.UpdateModelRequest.serialize, - response_deserializer=gca_model.Model.deserialize, - ) - return self._stubs['update_model'] - - @property - def delete_model(self) -> Callable[ - [model_service.DeleteModelRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete model method over gRPC. - - Deletes a Model. - - A model cannot be deleted if any - [Endpoint][google.cloud.aiplatform.v1.Endpoint] resource has a - [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] based - on the model in its - [deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models] - field. - - Returns: - Callable[[~.DeleteModelRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_model' not in self._stubs: - self._stubs['delete_model'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.ModelService/DeleteModel', - request_serializer=model_service.DeleteModelRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_model'] - - @property - def export_model(self) -> Callable[ - [model_service.ExportModelRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the export model method over gRPC. - - Exports a trained, exportable Model to a location specified by - the user. A Model is considered to be exportable if it has at - least one [supported export - format][google.cloud.aiplatform.v1.Model.supported_export_formats]. - - Returns: - Callable[[~.ExportModelRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'export_model' not in self._stubs: - self._stubs['export_model'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.ModelService/ExportModel', - request_serializer=model_service.ExportModelRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['export_model'] - - @property - def get_model_evaluation(self) -> Callable[ - [model_service.GetModelEvaluationRequest], - Awaitable[model_evaluation.ModelEvaluation]]: - r"""Return a callable for the get model evaluation method over gRPC. - - Gets a ModelEvaluation. - - Returns: - Callable[[~.GetModelEvaluationRequest], - Awaitable[~.ModelEvaluation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_model_evaluation' not in self._stubs: - self._stubs['get_model_evaluation'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.ModelService/GetModelEvaluation', - request_serializer=model_service.GetModelEvaluationRequest.serialize, - response_deserializer=model_evaluation.ModelEvaluation.deserialize, - ) - return self._stubs['get_model_evaluation'] - - @property - def list_model_evaluations(self) -> Callable[ - [model_service.ListModelEvaluationsRequest], - Awaitable[model_service.ListModelEvaluationsResponse]]: - r"""Return a callable for the list model evaluations method over gRPC. - - Lists ModelEvaluations in a Model. - - Returns: - Callable[[~.ListModelEvaluationsRequest], - Awaitable[~.ListModelEvaluationsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_model_evaluations' not in self._stubs: - self._stubs['list_model_evaluations'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.ModelService/ListModelEvaluations', - request_serializer=model_service.ListModelEvaluationsRequest.serialize, - response_deserializer=model_service.ListModelEvaluationsResponse.deserialize, - ) - return self._stubs['list_model_evaluations'] - - @property - def get_model_evaluation_slice(self) -> Callable[ - [model_service.GetModelEvaluationSliceRequest], - Awaitable[model_evaluation_slice.ModelEvaluationSlice]]: - r"""Return a callable for the get model evaluation slice method over gRPC. - - Gets a ModelEvaluationSlice. - - Returns: - Callable[[~.GetModelEvaluationSliceRequest], - Awaitable[~.ModelEvaluationSlice]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_model_evaluation_slice' not in self._stubs: - self._stubs['get_model_evaluation_slice'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.ModelService/GetModelEvaluationSlice', - request_serializer=model_service.GetModelEvaluationSliceRequest.serialize, - response_deserializer=model_evaluation_slice.ModelEvaluationSlice.deserialize, - ) - return self._stubs['get_model_evaluation_slice'] - - @property - def list_model_evaluation_slices(self) -> Callable[ - [model_service.ListModelEvaluationSlicesRequest], - Awaitable[model_service.ListModelEvaluationSlicesResponse]]: - r"""Return a callable for the list model evaluation slices method over gRPC. - - Lists ModelEvaluationSlices in a ModelEvaluation. - - Returns: - Callable[[~.ListModelEvaluationSlicesRequest], - Awaitable[~.ListModelEvaluationSlicesResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_model_evaluation_slices' not in self._stubs: - self._stubs['list_model_evaluation_slices'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.ModelService/ListModelEvaluationSlices', - request_serializer=model_service.ListModelEvaluationSlicesRequest.serialize, - response_deserializer=model_service.ListModelEvaluationSlicesResponse.deserialize, - ) - return self._stubs['list_model_evaluation_slices'] - - def close(self): - return self.grpc_channel.close() - - -__all__ = ( - 'ModelServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/__init__.py deleted file mode 100644 index 539616023d..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import PipelineServiceClient -from .async_client import PipelineServiceAsyncClient - -__all__ = ( - 'PipelineServiceClient', - 'PipelineServiceAsyncClient', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py deleted file mode 100644 index 21474e18be..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/async_client.py +++ /dev/null @@ -1,1076 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core.client_options import ClientOptions -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1.services.pipeline_service import pagers -from google.cloud.aiplatform_v1.types import encryption_spec -from google.cloud.aiplatform_v1.types import model -from google.cloud.aiplatform_v1.types import operation as gca_operation -from google.cloud.aiplatform_v1.types import pipeline_job -from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job -from google.cloud.aiplatform_v1.types import pipeline_service -from google.cloud.aiplatform_v1.types import pipeline_state -from google.cloud.aiplatform_v1.types import training_pipeline -from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore -from .transports.base import PipelineServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import PipelineServiceGrpcAsyncIOTransport -from .client import PipelineServiceClient - - -class PipelineServiceAsyncClient: - """A service for creating and managing Vertex AI's pipelines. This - includes both ``TrainingPipeline`` resources (used for AutoML and - custom training) and ``PipelineJob`` resources (used for Vertex AI - Pipelines). - """ - - _client: PipelineServiceClient - - DEFAULT_ENDPOINT = PipelineServiceClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = PipelineServiceClient.DEFAULT_MTLS_ENDPOINT - - artifact_path = staticmethod(PipelineServiceClient.artifact_path) - parse_artifact_path = staticmethod(PipelineServiceClient.parse_artifact_path) - context_path = staticmethod(PipelineServiceClient.context_path) - parse_context_path = staticmethod(PipelineServiceClient.parse_context_path) - custom_job_path = staticmethod(PipelineServiceClient.custom_job_path) - parse_custom_job_path = staticmethod(PipelineServiceClient.parse_custom_job_path) - endpoint_path = staticmethod(PipelineServiceClient.endpoint_path) - parse_endpoint_path = staticmethod(PipelineServiceClient.parse_endpoint_path) - execution_path = staticmethod(PipelineServiceClient.execution_path) - parse_execution_path = staticmethod(PipelineServiceClient.parse_execution_path) - model_path = staticmethod(PipelineServiceClient.model_path) - parse_model_path = staticmethod(PipelineServiceClient.parse_model_path) - network_path = staticmethod(PipelineServiceClient.network_path) - parse_network_path = staticmethod(PipelineServiceClient.parse_network_path) - pipeline_job_path = staticmethod(PipelineServiceClient.pipeline_job_path) - parse_pipeline_job_path = staticmethod(PipelineServiceClient.parse_pipeline_job_path) - training_pipeline_path = staticmethod(PipelineServiceClient.training_pipeline_path) - parse_training_pipeline_path = staticmethod(PipelineServiceClient.parse_training_pipeline_path) - common_billing_account_path = staticmethod(PipelineServiceClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(PipelineServiceClient.parse_common_billing_account_path) - common_folder_path = staticmethod(PipelineServiceClient.common_folder_path) - parse_common_folder_path = staticmethod(PipelineServiceClient.parse_common_folder_path) - common_organization_path = staticmethod(PipelineServiceClient.common_organization_path) - parse_common_organization_path = staticmethod(PipelineServiceClient.parse_common_organization_path) - common_project_path = staticmethod(PipelineServiceClient.common_project_path) - parse_common_project_path = staticmethod(PipelineServiceClient.parse_common_project_path) - common_location_path = staticmethod(PipelineServiceClient.common_location_path) - parse_common_location_path = staticmethod(PipelineServiceClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - PipelineServiceAsyncClient: The constructed client. - """ - return PipelineServiceClient.from_service_account_info.__func__(PipelineServiceAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - PipelineServiceAsyncClient: The constructed client. - """ - return PipelineServiceClient.from_service_account_file.__func__(PipelineServiceAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> PipelineServiceTransport: - """Returns the transport used by the client instance. - - Returns: - PipelineServiceTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(PipelineServiceClient).get_transport_class, type(PipelineServiceClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, PipelineServiceTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the pipeline service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.PipelineServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = PipelineServiceClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def create_training_pipeline(self, - request: Union[pipeline_service.CreateTrainingPipelineRequest, dict] = None, - *, - parent: str = None, - training_pipeline: gca_training_pipeline.TrainingPipeline = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_training_pipeline.TrainingPipeline: - r"""Creates a TrainingPipeline. A created - TrainingPipeline right away will be attempted to be run. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CreateTrainingPipelineRequest, dict]): - The request object. Request message for - [PipelineService.CreateTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.CreateTrainingPipeline]. - parent (:class:`str`): - Required. The resource name of the Location to create - the TrainingPipeline in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - training_pipeline (:class:`google.cloud.aiplatform_v1.types.TrainingPipeline`): - Required. The TrainingPipeline to - create. - - This corresponds to the ``training_pipeline`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.TrainingPipeline: - The TrainingPipeline orchestrates tasks associated with training a Model. It - always executes the training task, and optionally may - also export data from Vertex AI's Dataset which - becomes the training input, - [upload][google.cloud.aiplatform.v1.ModelService.UploadModel] - the Model to Vertex AI, and evaluate the Model. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, training_pipeline]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = pipeline_service.CreateTrainingPipelineRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if training_pipeline is not None: - request.training_pipeline = training_pipeline - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_training_pipeline, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_training_pipeline(self, - request: Union[pipeline_service.GetTrainingPipelineRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> training_pipeline.TrainingPipeline: - r"""Gets a TrainingPipeline. - - Args: - request (Union[google.cloud.aiplatform_v1.types.GetTrainingPipelineRequest, dict]): - The request object. Request message for - [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.GetTrainingPipeline]. - name (:class:`str`): - Required. The name of the TrainingPipeline resource. - Format: - ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.TrainingPipeline: - The TrainingPipeline orchestrates tasks associated with training a Model. It - always executes the training task, and optionally may - also export data from Vertex AI's Dataset which - becomes the training input, - [upload][google.cloud.aiplatform.v1.ModelService.UploadModel] - the Model to Vertex AI, and evaluate the Model. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = pipeline_service.GetTrainingPipelineRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_training_pipeline, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_training_pipelines(self, - request: Union[pipeline_service.ListTrainingPipelinesRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListTrainingPipelinesAsyncPager: - r"""Lists TrainingPipelines in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListTrainingPipelinesRequest, dict]): - The request object. Request message for - [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1.PipelineService.ListTrainingPipelines]. - parent (:class:`str`): - Required. The resource name of the Location to list the - TrainingPipelines from. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.pipeline_service.pagers.ListTrainingPipelinesAsyncPager: - Response message for - [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1.PipelineService.ListTrainingPipelines] - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = pipeline_service.ListTrainingPipelinesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_training_pipelines, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListTrainingPipelinesAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_training_pipeline(self, - request: Union[pipeline_service.DeleteTrainingPipelineRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a TrainingPipeline. - - Args: - request (Union[google.cloud.aiplatform_v1.types.DeleteTrainingPipelineRequest, dict]): - The request object. Request message for - [PipelineService.DeleteTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.DeleteTrainingPipeline]. - name (:class:`str`): - Required. The name of the TrainingPipeline resource to - be deleted. Format: - ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = pipeline_service.DeleteTrainingPipelineRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_training_pipeline, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def cancel_training_pipeline(self, - request: Union[pipeline_service.CancelTrainingPipelineRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Cancels a TrainingPipeline. Starts asynchronous cancellation on - the TrainingPipeline. The server makes a best effort to cancel - the pipeline, but success is not guaranteed. Clients can use - [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.GetTrainingPipeline] - or other methods to check whether the cancellation succeeded or - whether the pipeline completed despite cancellation. On - successful cancellation, the TrainingPipeline is not deleted; - instead it becomes a pipeline with a - [TrainingPipeline.error][google.cloud.aiplatform.v1.TrainingPipeline.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [TrainingPipeline.state][google.cloud.aiplatform.v1.TrainingPipeline.state] - is set to ``CANCELLED``. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CancelTrainingPipelineRequest, dict]): - The request object. Request message for - [PipelineService.CancelTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.CancelTrainingPipeline]. - name (:class:`str`): - Required. The name of the TrainingPipeline to cancel. - Format: - ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = pipeline_service.CancelTrainingPipelineRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.cancel_training_pipeline, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - async def create_pipeline_job(self, - request: Union[pipeline_service.CreatePipelineJobRequest, dict] = None, - *, - parent: str = None, - pipeline_job: gca_pipeline_job.PipelineJob = None, - pipeline_job_id: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_pipeline_job.PipelineJob: - r"""Creates a PipelineJob. A PipelineJob will run - immediately when created. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CreatePipelineJobRequest, dict]): - The request object. Request message for - [PipelineService.CreatePipelineJob][google.cloud.aiplatform.v1.PipelineService.CreatePipelineJob]. - parent (:class:`str`): - Required. The resource name of the Location to create - the PipelineJob in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - pipeline_job (:class:`google.cloud.aiplatform_v1.types.PipelineJob`): - Required. The PipelineJob to create. - This corresponds to the ``pipeline_job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - pipeline_job_id (:class:`str`): - The ID to use for the PipelineJob, which will become the - final component of the PipelineJob name. If not - provided, an ID will be automatically generated. - - This value should be less than 128 characters, and valid - characters are /[a-z][0-9]-/. - - This corresponds to the ``pipeline_job_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.PipelineJob: - An instance of a machine learning - PipelineJob. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, pipeline_job, pipeline_job_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = pipeline_service.CreatePipelineJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if pipeline_job is not None: - request.pipeline_job = pipeline_job - if pipeline_job_id is not None: - request.pipeline_job_id = pipeline_job_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_pipeline_job, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_pipeline_job(self, - request: Union[pipeline_service.GetPipelineJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pipeline_job.PipelineJob: - r"""Gets a PipelineJob. - - Args: - request (Union[google.cloud.aiplatform_v1.types.GetPipelineJobRequest, dict]): - The request object. Request message for - [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1.PipelineService.GetPipelineJob]. - name (:class:`str`): - Required. The name of the PipelineJob resource. Format: - ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.PipelineJob: - An instance of a machine learning - PipelineJob. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = pipeline_service.GetPipelineJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_pipeline_job, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_pipeline_jobs(self, - request: Union[pipeline_service.ListPipelineJobsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListPipelineJobsAsyncPager: - r"""Lists PipelineJobs in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListPipelineJobsRequest, dict]): - The request object. Request message for - [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1.PipelineService.ListPipelineJobs]. - parent (:class:`str`): - Required. The resource name of the Location to list the - PipelineJobs from. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.pipeline_service.pagers.ListPipelineJobsAsyncPager: - Response message for - [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1.PipelineService.ListPipelineJobs] - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = pipeline_service.ListPipelineJobsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_pipeline_jobs, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListPipelineJobsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_pipeline_job(self, - request: Union[pipeline_service.DeletePipelineJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a PipelineJob. - - Args: - request (Union[google.cloud.aiplatform_v1.types.DeletePipelineJobRequest, dict]): - The request object. Request message for - [PipelineService.DeletePipelineJob][google.cloud.aiplatform.v1.PipelineService.DeletePipelineJob]. - name (:class:`str`): - Required. The name of the PipelineJob resource to be - deleted. Format: - ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = pipeline_service.DeletePipelineJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_pipeline_job, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def cancel_pipeline_job(self, - request: Union[pipeline_service.CancelPipelineJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Cancels a PipelineJob. Starts asynchronous cancellation on the - PipelineJob. The server makes a best effort to cancel the - pipeline, but success is not guaranteed. Clients can use - [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1.PipelineService.GetPipelineJob] - or other methods to check whether the cancellation succeeded or - whether the pipeline completed despite cancellation. On - successful cancellation, the PipelineJob is not deleted; instead - it becomes a pipeline with a - [PipelineJob.error][google.cloud.aiplatform.v1.PipelineJob.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [PipelineJob.state][google.cloud.aiplatform.v1.PipelineJob.state] - is set to ``CANCELLED``. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CancelPipelineJobRequest, dict]): - The request object. Request message for - [PipelineService.CancelPipelineJob][google.cloud.aiplatform.v1.PipelineService.CancelPipelineJob]. - name (:class:`str`): - Required. The name of the PipelineJob to cancel. Format: - ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = pipeline_service.CancelPipelineJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.cancel_pipeline_job, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - async def __aenter__(self): - return self - - async def __aexit__(self, exc_type, exc, tb): - await self.transport.close() - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "PipelineServiceAsyncClient", -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/client.py deleted file mode 100644 index 266dd70c68..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/client.py +++ /dev/null @@ -1,1346 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import os -import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1.services.pipeline_service import pagers -from google.cloud.aiplatform_v1.types import encryption_spec -from google.cloud.aiplatform_v1.types import model -from google.cloud.aiplatform_v1.types import operation as gca_operation -from google.cloud.aiplatform_v1.types import pipeline_job -from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job -from google.cloud.aiplatform_v1.types import pipeline_service -from google.cloud.aiplatform_v1.types import pipeline_state -from google.cloud.aiplatform_v1.types import training_pipeline -from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore -from .transports.base import PipelineServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import PipelineServiceGrpcTransport -from .transports.grpc_asyncio import PipelineServiceGrpcAsyncIOTransport - - -class PipelineServiceClientMeta(type): - """Metaclass for the PipelineService client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[PipelineServiceTransport]] - _transport_registry["grpc"] = PipelineServiceGrpcTransport - _transport_registry["grpc_asyncio"] = PipelineServiceGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[PipelineServiceTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class PipelineServiceClient(metaclass=PipelineServiceClientMeta): - """A service for creating and managing Vertex AI's pipelines. This - includes both ``TrainingPipeline`` resources (used for AutoML and - custom training) and ``PipelineJob`` resources (used for Vertex AI - Pipelines). - """ - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "aiplatform.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - PipelineServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - PipelineServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> PipelineServiceTransport: - """Returns the transport used by the client instance. - - Returns: - PipelineServiceTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def artifact_path(project: str,location: str,metadata_store: str,artifact: str,) -> str: - """Returns a fully-qualified artifact string.""" - return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}".format(project=project, location=location, metadata_store=metadata_store, artifact=artifact, ) - - @staticmethod - def parse_artifact_path(path: str) -> Dict[str,str]: - """Parses a artifact path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/artifacts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def context_path(project: str,location: str,metadata_store: str,context: str,) -> str: - """Returns a fully-qualified context string.""" - return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}".format(project=project, location=location, metadata_store=metadata_store, context=context, ) - - @staticmethod - def parse_context_path(path: str) -> Dict[str,str]: - """Parses a context path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/contexts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def custom_job_path(project: str,location: str,custom_job: str,) -> str: - """Returns a fully-qualified custom_job string.""" - return "projects/{project}/locations/{location}/customJobs/{custom_job}".format(project=project, location=location, custom_job=custom_job, ) - - @staticmethod - def parse_custom_job_path(path: str) -> Dict[str,str]: - """Parses a custom_job path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/customJobs/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def endpoint_path(project: str,location: str,endpoint: str,) -> str: - """Returns a fully-qualified endpoint string.""" - return "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) - - @staticmethod - def parse_endpoint_path(path: str) -> Dict[str,str]: - """Parses a endpoint path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/endpoints/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def execution_path(project: str,location: str,metadata_store: str,execution: str,) -> str: - """Returns a fully-qualified execution string.""" - return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}".format(project=project, location=location, metadata_store=metadata_store, execution=execution, ) - - @staticmethod - def parse_execution_path(path: str) -> Dict[str,str]: - """Parses a execution path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/executions/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def model_path(project: str,location: str,model: str,) -> str: - """Returns a fully-qualified model string.""" - return "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) - - @staticmethod - def parse_model_path(path: str) -> Dict[str,str]: - """Parses a model path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def network_path(project: str,network: str,) -> str: - """Returns a fully-qualified network string.""" - return "projects/{project}/global/networks/{network}".format(project=project, network=network, ) - - @staticmethod - def parse_network_path(path: str) -> Dict[str,str]: - """Parses a network path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/global/networks/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def pipeline_job_path(project: str,location: str,pipeline_job: str,) -> str: - """Returns a fully-qualified pipeline_job string.""" - return "projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}".format(project=project, location=location, pipeline_job=pipeline_job, ) - - @staticmethod - def parse_pipeline_job_path(path: str) -> Dict[str,str]: - """Parses a pipeline_job path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/pipelineJobs/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def training_pipeline_path(project: str,location: str,training_pipeline: str,) -> str: - """Returns a fully-qualified training_pipeline string.""" - return "projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}".format(project=project, location=location, training_pipeline=training_pipeline, ) - - @staticmethod - def parse_training_pipeline_path(path: str) -> Dict[str,str]: - """Parses a training_pipeline path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/trainingPipelines/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, PipelineServiceTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the pipeline service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, PipelineServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): - raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, PipelineServiceTransport): - # transport is a PipelineServiceTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - always_use_jwt_access=True, - ) - - def create_training_pipeline(self, - request: Union[pipeline_service.CreateTrainingPipelineRequest, dict] = None, - *, - parent: str = None, - training_pipeline: gca_training_pipeline.TrainingPipeline = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_training_pipeline.TrainingPipeline: - r"""Creates a TrainingPipeline. A created - TrainingPipeline right away will be attempted to be run. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CreateTrainingPipelineRequest, dict]): - The request object. Request message for - [PipelineService.CreateTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.CreateTrainingPipeline]. - parent (str): - Required. The resource name of the Location to create - the TrainingPipeline in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - training_pipeline (google.cloud.aiplatform_v1.types.TrainingPipeline): - Required. The TrainingPipeline to - create. - - This corresponds to the ``training_pipeline`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.TrainingPipeline: - The TrainingPipeline orchestrates tasks associated with training a Model. It - always executes the training task, and optionally may - also export data from Vertex AI's Dataset which - becomes the training input, - [upload][google.cloud.aiplatform.v1.ModelService.UploadModel] - the Model to Vertex AI, and evaluate the Model. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, training_pipeline]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a pipeline_service.CreateTrainingPipelineRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, pipeline_service.CreateTrainingPipelineRequest): - request = pipeline_service.CreateTrainingPipelineRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if training_pipeline is not None: - request.training_pipeline = training_pipeline - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_training_pipeline] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_training_pipeline(self, - request: Union[pipeline_service.GetTrainingPipelineRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> training_pipeline.TrainingPipeline: - r"""Gets a TrainingPipeline. - - Args: - request (Union[google.cloud.aiplatform_v1.types.GetTrainingPipelineRequest, dict]): - The request object. Request message for - [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.GetTrainingPipeline]. - name (str): - Required. The name of the TrainingPipeline resource. - Format: - ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.TrainingPipeline: - The TrainingPipeline orchestrates tasks associated with training a Model. It - always executes the training task, and optionally may - also export data from Vertex AI's Dataset which - becomes the training input, - [upload][google.cloud.aiplatform.v1.ModelService.UploadModel] - the Model to Vertex AI, and evaluate the Model. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a pipeline_service.GetTrainingPipelineRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, pipeline_service.GetTrainingPipelineRequest): - request = pipeline_service.GetTrainingPipelineRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_training_pipeline] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_training_pipelines(self, - request: Union[pipeline_service.ListTrainingPipelinesRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListTrainingPipelinesPager: - r"""Lists TrainingPipelines in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListTrainingPipelinesRequest, dict]): - The request object. Request message for - [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1.PipelineService.ListTrainingPipelines]. - parent (str): - Required. The resource name of the Location to list the - TrainingPipelines from. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.pipeline_service.pagers.ListTrainingPipelinesPager: - Response message for - [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1.PipelineService.ListTrainingPipelines] - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a pipeline_service.ListTrainingPipelinesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, pipeline_service.ListTrainingPipelinesRequest): - request = pipeline_service.ListTrainingPipelinesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_training_pipelines] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListTrainingPipelinesPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_training_pipeline(self, - request: Union[pipeline_service.DeleteTrainingPipelineRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a TrainingPipeline. - - Args: - request (Union[google.cloud.aiplatform_v1.types.DeleteTrainingPipelineRequest, dict]): - The request object. Request message for - [PipelineService.DeleteTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.DeleteTrainingPipeline]. - name (str): - Required. The name of the TrainingPipeline resource to - be deleted. Format: - ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a pipeline_service.DeleteTrainingPipelineRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, pipeline_service.DeleteTrainingPipelineRequest): - request = pipeline_service.DeleteTrainingPipelineRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_training_pipeline] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def cancel_training_pipeline(self, - request: Union[pipeline_service.CancelTrainingPipelineRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Cancels a TrainingPipeline. Starts asynchronous cancellation on - the TrainingPipeline. The server makes a best effort to cancel - the pipeline, but success is not guaranteed. Clients can use - [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.GetTrainingPipeline] - or other methods to check whether the cancellation succeeded or - whether the pipeline completed despite cancellation. On - successful cancellation, the TrainingPipeline is not deleted; - instead it becomes a pipeline with a - [TrainingPipeline.error][google.cloud.aiplatform.v1.TrainingPipeline.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [TrainingPipeline.state][google.cloud.aiplatform.v1.TrainingPipeline.state] - is set to ``CANCELLED``. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CancelTrainingPipelineRequest, dict]): - The request object. Request message for - [PipelineService.CancelTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.CancelTrainingPipeline]. - name (str): - Required. The name of the TrainingPipeline to cancel. - Format: - ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a pipeline_service.CancelTrainingPipelineRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, pipeline_service.CancelTrainingPipelineRequest): - request = pipeline_service.CancelTrainingPipelineRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.cancel_training_pipeline] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - def create_pipeline_job(self, - request: Union[pipeline_service.CreatePipelineJobRequest, dict] = None, - *, - parent: str = None, - pipeline_job: gca_pipeline_job.PipelineJob = None, - pipeline_job_id: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_pipeline_job.PipelineJob: - r"""Creates a PipelineJob. A PipelineJob will run - immediately when created. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CreatePipelineJobRequest, dict]): - The request object. Request message for - [PipelineService.CreatePipelineJob][google.cloud.aiplatform.v1.PipelineService.CreatePipelineJob]. - parent (str): - Required. The resource name of the Location to create - the PipelineJob in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - pipeline_job (google.cloud.aiplatform_v1.types.PipelineJob): - Required. The PipelineJob to create. - This corresponds to the ``pipeline_job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - pipeline_job_id (str): - The ID to use for the PipelineJob, which will become the - final component of the PipelineJob name. If not - provided, an ID will be automatically generated. - - This value should be less than 128 characters, and valid - characters are /[a-z][0-9]-/. - - This corresponds to the ``pipeline_job_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.PipelineJob: - An instance of a machine learning - PipelineJob. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, pipeline_job, pipeline_job_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a pipeline_service.CreatePipelineJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, pipeline_service.CreatePipelineJobRequest): - request = pipeline_service.CreatePipelineJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if pipeline_job is not None: - request.pipeline_job = pipeline_job - if pipeline_job_id is not None: - request.pipeline_job_id = pipeline_job_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_pipeline_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_pipeline_job(self, - request: Union[pipeline_service.GetPipelineJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pipeline_job.PipelineJob: - r"""Gets a PipelineJob. - - Args: - request (Union[google.cloud.aiplatform_v1.types.GetPipelineJobRequest, dict]): - The request object. Request message for - [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1.PipelineService.GetPipelineJob]. - name (str): - Required. The name of the PipelineJob resource. Format: - ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.PipelineJob: - An instance of a machine learning - PipelineJob. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a pipeline_service.GetPipelineJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, pipeline_service.GetPipelineJobRequest): - request = pipeline_service.GetPipelineJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_pipeline_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_pipeline_jobs(self, - request: Union[pipeline_service.ListPipelineJobsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListPipelineJobsPager: - r"""Lists PipelineJobs in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListPipelineJobsRequest, dict]): - The request object. Request message for - [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1.PipelineService.ListPipelineJobs]. - parent (str): - Required. The resource name of the Location to list the - PipelineJobs from. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.pipeline_service.pagers.ListPipelineJobsPager: - Response message for - [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1.PipelineService.ListPipelineJobs] - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a pipeline_service.ListPipelineJobsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, pipeline_service.ListPipelineJobsRequest): - request = pipeline_service.ListPipelineJobsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_pipeline_jobs] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListPipelineJobsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_pipeline_job(self, - request: Union[pipeline_service.DeletePipelineJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a PipelineJob. - - Args: - request (Union[google.cloud.aiplatform_v1.types.DeletePipelineJobRequest, dict]): - The request object. Request message for - [PipelineService.DeletePipelineJob][google.cloud.aiplatform.v1.PipelineService.DeletePipelineJob]. - name (str): - Required. The name of the PipelineJob resource to be - deleted. Format: - ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a pipeline_service.DeletePipelineJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, pipeline_service.DeletePipelineJobRequest): - request = pipeline_service.DeletePipelineJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_pipeline_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def cancel_pipeline_job(self, - request: Union[pipeline_service.CancelPipelineJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Cancels a PipelineJob. Starts asynchronous cancellation on the - PipelineJob. The server makes a best effort to cancel the - pipeline, but success is not guaranteed. Clients can use - [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1.PipelineService.GetPipelineJob] - or other methods to check whether the cancellation succeeded or - whether the pipeline completed despite cancellation. On - successful cancellation, the PipelineJob is not deleted; instead - it becomes a pipeline with a - [PipelineJob.error][google.cloud.aiplatform.v1.PipelineJob.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [PipelineJob.state][google.cloud.aiplatform.v1.PipelineJob.state] - is set to ``CANCELLED``. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CancelPipelineJobRequest, dict]): - The request object. Request message for - [PipelineService.CancelPipelineJob][google.cloud.aiplatform.v1.PipelineService.CancelPipelineJob]. - name (str): - Required. The name of the PipelineJob to cancel. Format: - ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a pipeline_service.CancelPipelineJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, pipeline_service.CancelPipelineJobRequest): - request = pipeline_service.CancelPipelineJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.cancel_pipeline_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - """Releases underlying transport's resources. - - .. warning:: - ONLY use as a context manager if the transport is NOT shared - with other clients! Exiting the with block will CLOSE the transport - and may cause errors in other clients! - """ - self.transport.close() - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "PipelineServiceClient", -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/pagers.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/pagers.py deleted file mode 100644 index c9e50d2341..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/pagers.py +++ /dev/null @@ -1,264 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator - -from google.cloud.aiplatform_v1.types import pipeline_job -from google.cloud.aiplatform_v1.types import pipeline_service -from google.cloud.aiplatform_v1.types import training_pipeline - - -class ListTrainingPipelinesPager: - """A pager for iterating through ``list_training_pipelines`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListTrainingPipelinesResponse` object, and - provides an ``__iter__`` method to iterate through its - ``training_pipelines`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListTrainingPipelines`` requests and continue to iterate - through the ``training_pipelines`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListTrainingPipelinesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., pipeline_service.ListTrainingPipelinesResponse], - request: pipeline_service.ListTrainingPipelinesRequest, - response: pipeline_service.ListTrainingPipelinesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListTrainingPipelinesRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListTrainingPipelinesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = pipeline_service.ListTrainingPipelinesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[pipeline_service.ListTrainingPipelinesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[training_pipeline.TrainingPipeline]: - for page in self.pages: - yield from page.training_pipelines - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListTrainingPipelinesAsyncPager: - """A pager for iterating through ``list_training_pipelines`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListTrainingPipelinesResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``training_pipelines`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListTrainingPipelines`` requests and continue to iterate - through the ``training_pipelines`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListTrainingPipelinesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[pipeline_service.ListTrainingPipelinesResponse]], - request: pipeline_service.ListTrainingPipelinesRequest, - response: pipeline_service.ListTrainingPipelinesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListTrainingPipelinesRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListTrainingPipelinesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = pipeline_service.ListTrainingPipelinesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[pipeline_service.ListTrainingPipelinesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[training_pipeline.TrainingPipeline]: - async def async_generator(): - async for page in self.pages: - for response in page.training_pipelines: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListPipelineJobsPager: - """A pager for iterating through ``list_pipeline_jobs`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListPipelineJobsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``pipeline_jobs`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListPipelineJobs`` requests and continue to iterate - through the ``pipeline_jobs`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListPipelineJobsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., pipeline_service.ListPipelineJobsResponse], - request: pipeline_service.ListPipelineJobsRequest, - response: pipeline_service.ListPipelineJobsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListPipelineJobsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListPipelineJobsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = pipeline_service.ListPipelineJobsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[pipeline_service.ListPipelineJobsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[pipeline_job.PipelineJob]: - for page in self.pages: - yield from page.pipeline_jobs - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListPipelineJobsAsyncPager: - """A pager for iterating through ``list_pipeline_jobs`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListPipelineJobsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``pipeline_jobs`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListPipelineJobs`` requests and continue to iterate - through the ``pipeline_jobs`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListPipelineJobsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[pipeline_service.ListPipelineJobsResponse]], - request: pipeline_service.ListPipelineJobsRequest, - response: pipeline_service.ListPipelineJobsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListPipelineJobsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListPipelineJobsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = pipeline_service.ListPipelineJobsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[pipeline_service.ListPipelineJobsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[pipeline_job.PipelineJob]: - async def async_generator(): - async for page in self.pages: - for response in page.pipeline_jobs: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/__init__.py deleted file mode 100644 index 77051d8254..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import PipelineServiceTransport -from .grpc import PipelineServiceGrpcTransport -from .grpc_asyncio import PipelineServiceGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[PipelineServiceTransport]] -_transport_registry['grpc'] = PipelineServiceGrpcTransport -_transport_registry['grpc_asyncio'] = PipelineServiceGrpcAsyncIOTransport - -__all__ = ( - 'PipelineServiceTransport', - 'PipelineServiceGrpcTransport', - 'PipelineServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/base.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/base.py deleted file mode 100644 index bd6aa5caec..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/base.py +++ /dev/null @@ -1,284 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import pkg_resources - -import google.auth # type: ignore -import google.api_core -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.aiplatform_v1.types import pipeline_job -from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job -from google.cloud.aiplatform_v1.types import pipeline_service -from google.cloud.aiplatform_v1.types import training_pipeline -from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline -from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-aiplatform', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -class PipelineServiceTransport(abc.ABC): - """Abstract transport class for PipelineService.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'aiplatform.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials are service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.create_training_pipeline: gapic_v1.method.wrap_method( - self.create_training_pipeline, - default_timeout=None, - client_info=client_info, - ), - self.get_training_pipeline: gapic_v1.method.wrap_method( - self.get_training_pipeline, - default_timeout=None, - client_info=client_info, - ), - self.list_training_pipelines: gapic_v1.method.wrap_method( - self.list_training_pipelines, - default_timeout=None, - client_info=client_info, - ), - self.delete_training_pipeline: gapic_v1.method.wrap_method( - self.delete_training_pipeline, - default_timeout=None, - client_info=client_info, - ), - self.cancel_training_pipeline: gapic_v1.method.wrap_method( - self.cancel_training_pipeline, - default_timeout=None, - client_info=client_info, - ), - self.create_pipeline_job: gapic_v1.method.wrap_method( - self.create_pipeline_job, - default_timeout=None, - client_info=client_info, - ), - self.get_pipeline_job: gapic_v1.method.wrap_method( - self.get_pipeline_job, - default_timeout=None, - client_info=client_info, - ), - self.list_pipeline_jobs: gapic_v1.method.wrap_method( - self.list_pipeline_jobs, - default_timeout=None, - client_info=client_info, - ), - self.delete_pipeline_job: gapic_v1.method.wrap_method( - self.delete_pipeline_job, - default_timeout=None, - client_info=client_info, - ), - self.cancel_pipeline_job: gapic_v1.method.wrap_method( - self.cancel_pipeline_job, - default_timeout=None, - client_info=client_info, - ), - } - - def close(self): - """Closes resources associated with the transport. - - .. warning:: - Only call this method if the transport is NOT shared - with other clients - this may cause errors in other clients! - """ - raise NotImplementedError() - - @property - def operations_client(self): - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def create_training_pipeline(self) -> Callable[ - [pipeline_service.CreateTrainingPipelineRequest], - Union[ - gca_training_pipeline.TrainingPipeline, - Awaitable[gca_training_pipeline.TrainingPipeline] - ]]: - raise NotImplementedError() - - @property - def get_training_pipeline(self) -> Callable[ - [pipeline_service.GetTrainingPipelineRequest], - Union[ - training_pipeline.TrainingPipeline, - Awaitable[training_pipeline.TrainingPipeline] - ]]: - raise NotImplementedError() - - @property - def list_training_pipelines(self) -> Callable[ - [pipeline_service.ListTrainingPipelinesRequest], - Union[ - pipeline_service.ListTrainingPipelinesResponse, - Awaitable[pipeline_service.ListTrainingPipelinesResponse] - ]]: - raise NotImplementedError() - - @property - def delete_training_pipeline(self) -> Callable[ - [pipeline_service.DeleteTrainingPipelineRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def cancel_training_pipeline(self) -> Callable[ - [pipeline_service.CancelTrainingPipelineRequest], - Union[ - empty_pb2.Empty, - Awaitable[empty_pb2.Empty] - ]]: - raise NotImplementedError() - - @property - def create_pipeline_job(self) -> Callable[ - [pipeline_service.CreatePipelineJobRequest], - Union[ - gca_pipeline_job.PipelineJob, - Awaitable[gca_pipeline_job.PipelineJob] - ]]: - raise NotImplementedError() - - @property - def get_pipeline_job(self) -> Callable[ - [pipeline_service.GetPipelineJobRequest], - Union[ - pipeline_job.PipelineJob, - Awaitable[pipeline_job.PipelineJob] - ]]: - raise NotImplementedError() - - @property - def list_pipeline_jobs(self) -> Callable[ - [pipeline_service.ListPipelineJobsRequest], - Union[ - pipeline_service.ListPipelineJobsResponse, - Awaitable[pipeline_service.ListPipelineJobsResponse] - ]]: - raise NotImplementedError() - - @property - def delete_pipeline_job(self) -> Callable[ - [pipeline_service.DeletePipelineJobRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def cancel_pipeline_job(self) -> Callable[ - [pipeline_service.CancelPipelineJobRequest], - Union[ - empty_pb2.Empty, - Awaitable[empty_pb2.Empty] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'PipelineServiceTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py deleted file mode 100644 index 482f1c0544..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc.py +++ /dev/null @@ -1,541 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers -from google.api_core import operations_v1 -from google.api_core import gapic_v1 -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.aiplatform_v1.types import pipeline_job -from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job -from google.cloud.aiplatform_v1.types import pipeline_service -from google.cloud.aiplatform_v1.types import training_pipeline -from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline -from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from .base import PipelineServiceTransport, DEFAULT_CLIENT_INFO - - -class PipelineServiceGrpcTransport(PipelineServiceTransport): - """gRPC backend transport for PipelineService. - - A service for creating and managing Vertex AI's pipelines. This - includes both ``TrainingPipeline`` resources (used for AutoML and - custom training) and ``PipelineJob`` resources (used for Vertex AI - Pipelines). - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_training_pipeline(self) -> Callable[ - [pipeline_service.CreateTrainingPipelineRequest], - gca_training_pipeline.TrainingPipeline]: - r"""Return a callable for the create training pipeline method over gRPC. - - Creates a TrainingPipeline. A created - TrainingPipeline right away will be attempted to be run. - - Returns: - Callable[[~.CreateTrainingPipelineRequest], - ~.TrainingPipeline]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_training_pipeline' not in self._stubs: - self._stubs['create_training_pipeline'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.PipelineService/CreateTrainingPipeline', - request_serializer=pipeline_service.CreateTrainingPipelineRequest.serialize, - response_deserializer=gca_training_pipeline.TrainingPipeline.deserialize, - ) - return self._stubs['create_training_pipeline'] - - @property - def get_training_pipeline(self) -> Callable[ - [pipeline_service.GetTrainingPipelineRequest], - training_pipeline.TrainingPipeline]: - r"""Return a callable for the get training pipeline method over gRPC. - - Gets a TrainingPipeline. - - Returns: - Callable[[~.GetTrainingPipelineRequest], - ~.TrainingPipeline]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_training_pipeline' not in self._stubs: - self._stubs['get_training_pipeline'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.PipelineService/GetTrainingPipeline', - request_serializer=pipeline_service.GetTrainingPipelineRequest.serialize, - response_deserializer=training_pipeline.TrainingPipeline.deserialize, - ) - return self._stubs['get_training_pipeline'] - - @property - def list_training_pipelines(self) -> Callable[ - [pipeline_service.ListTrainingPipelinesRequest], - pipeline_service.ListTrainingPipelinesResponse]: - r"""Return a callable for the list training pipelines method over gRPC. - - Lists TrainingPipelines in a Location. - - Returns: - Callable[[~.ListTrainingPipelinesRequest], - ~.ListTrainingPipelinesResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_training_pipelines' not in self._stubs: - self._stubs['list_training_pipelines'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.PipelineService/ListTrainingPipelines', - request_serializer=pipeline_service.ListTrainingPipelinesRequest.serialize, - response_deserializer=pipeline_service.ListTrainingPipelinesResponse.deserialize, - ) - return self._stubs['list_training_pipelines'] - - @property - def delete_training_pipeline(self) -> Callable[ - [pipeline_service.DeleteTrainingPipelineRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete training pipeline method over gRPC. - - Deletes a TrainingPipeline. - - Returns: - Callable[[~.DeleteTrainingPipelineRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_training_pipeline' not in self._stubs: - self._stubs['delete_training_pipeline'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.PipelineService/DeleteTrainingPipeline', - request_serializer=pipeline_service.DeleteTrainingPipelineRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_training_pipeline'] - - @property - def cancel_training_pipeline(self) -> Callable[ - [pipeline_service.CancelTrainingPipelineRequest], - empty_pb2.Empty]: - r"""Return a callable for the cancel training pipeline method over gRPC. - - Cancels a TrainingPipeline. Starts asynchronous cancellation on - the TrainingPipeline. The server makes a best effort to cancel - the pipeline, but success is not guaranteed. Clients can use - [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.GetTrainingPipeline] - or other methods to check whether the cancellation succeeded or - whether the pipeline completed despite cancellation. On - successful cancellation, the TrainingPipeline is not deleted; - instead it becomes a pipeline with a - [TrainingPipeline.error][google.cloud.aiplatform.v1.TrainingPipeline.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [TrainingPipeline.state][google.cloud.aiplatform.v1.TrainingPipeline.state] - is set to ``CANCELLED``. - - Returns: - Callable[[~.CancelTrainingPipelineRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'cancel_training_pipeline' not in self._stubs: - self._stubs['cancel_training_pipeline'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.PipelineService/CancelTrainingPipeline', - request_serializer=pipeline_service.CancelTrainingPipelineRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['cancel_training_pipeline'] - - @property - def create_pipeline_job(self) -> Callable[ - [pipeline_service.CreatePipelineJobRequest], - gca_pipeline_job.PipelineJob]: - r"""Return a callable for the create pipeline job method over gRPC. - - Creates a PipelineJob. A PipelineJob will run - immediately when created. - - Returns: - Callable[[~.CreatePipelineJobRequest], - ~.PipelineJob]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_pipeline_job' not in self._stubs: - self._stubs['create_pipeline_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.PipelineService/CreatePipelineJob', - request_serializer=pipeline_service.CreatePipelineJobRequest.serialize, - response_deserializer=gca_pipeline_job.PipelineJob.deserialize, - ) - return self._stubs['create_pipeline_job'] - - @property - def get_pipeline_job(self) -> Callable[ - [pipeline_service.GetPipelineJobRequest], - pipeline_job.PipelineJob]: - r"""Return a callable for the get pipeline job method over gRPC. - - Gets a PipelineJob. - - Returns: - Callable[[~.GetPipelineJobRequest], - ~.PipelineJob]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_pipeline_job' not in self._stubs: - self._stubs['get_pipeline_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.PipelineService/GetPipelineJob', - request_serializer=pipeline_service.GetPipelineJobRequest.serialize, - response_deserializer=pipeline_job.PipelineJob.deserialize, - ) - return self._stubs['get_pipeline_job'] - - @property - def list_pipeline_jobs(self) -> Callable[ - [pipeline_service.ListPipelineJobsRequest], - pipeline_service.ListPipelineJobsResponse]: - r"""Return a callable for the list pipeline jobs method over gRPC. - - Lists PipelineJobs in a Location. - - Returns: - Callable[[~.ListPipelineJobsRequest], - ~.ListPipelineJobsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_pipeline_jobs' not in self._stubs: - self._stubs['list_pipeline_jobs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.PipelineService/ListPipelineJobs', - request_serializer=pipeline_service.ListPipelineJobsRequest.serialize, - response_deserializer=pipeline_service.ListPipelineJobsResponse.deserialize, - ) - return self._stubs['list_pipeline_jobs'] - - @property - def delete_pipeline_job(self) -> Callable[ - [pipeline_service.DeletePipelineJobRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete pipeline job method over gRPC. - - Deletes a PipelineJob. - - Returns: - Callable[[~.DeletePipelineJobRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_pipeline_job' not in self._stubs: - self._stubs['delete_pipeline_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.PipelineService/DeletePipelineJob', - request_serializer=pipeline_service.DeletePipelineJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_pipeline_job'] - - @property - def cancel_pipeline_job(self) -> Callable[ - [pipeline_service.CancelPipelineJobRequest], - empty_pb2.Empty]: - r"""Return a callable for the cancel pipeline job method over gRPC. - - Cancels a PipelineJob. Starts asynchronous cancellation on the - PipelineJob. The server makes a best effort to cancel the - pipeline, but success is not guaranteed. Clients can use - [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1.PipelineService.GetPipelineJob] - or other methods to check whether the cancellation succeeded or - whether the pipeline completed despite cancellation. On - successful cancellation, the PipelineJob is not deleted; instead - it becomes a pipeline with a - [PipelineJob.error][google.cloud.aiplatform.v1.PipelineJob.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [PipelineJob.state][google.cloud.aiplatform.v1.PipelineJob.state] - is set to ``CANCELLED``. - - Returns: - Callable[[~.CancelPipelineJobRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'cancel_pipeline_job' not in self._stubs: - self._stubs['cancel_pipeline_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.PipelineService/CancelPipelineJob', - request_serializer=pipeline_service.CancelPipelineJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['cancel_pipeline_job'] - - def close(self): - self.grpc_channel.close() - -__all__ = ( - 'PipelineServiceGrpcTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc_asyncio.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc_asyncio.py deleted file mode 100644 index 5373453ffa..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/pipeline_service/transports/grpc_asyncio.py +++ /dev/null @@ -1,545 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers_async -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.aiplatform_v1.types import pipeline_job -from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job -from google.cloud.aiplatform_v1.types import pipeline_service -from google.cloud.aiplatform_v1.types import training_pipeline -from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline -from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from .base import PipelineServiceTransport, DEFAULT_CLIENT_INFO -from .grpc import PipelineServiceGrpcTransport - - -class PipelineServiceGrpcAsyncIOTransport(PipelineServiceTransport): - """gRPC AsyncIO backend transport for PipelineService. - - A service for creating and managing Vertex AI's pipelines. This - includes both ``TrainingPipeline`` resources (used for AutoML and - custom training) and ``PipelineJob`` resources (used for Vertex AI - Pipelines). - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsAsyncClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_training_pipeline(self) -> Callable[ - [pipeline_service.CreateTrainingPipelineRequest], - Awaitable[gca_training_pipeline.TrainingPipeline]]: - r"""Return a callable for the create training pipeline method over gRPC. - - Creates a TrainingPipeline. A created - TrainingPipeline right away will be attempted to be run. - - Returns: - Callable[[~.CreateTrainingPipelineRequest], - Awaitable[~.TrainingPipeline]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_training_pipeline' not in self._stubs: - self._stubs['create_training_pipeline'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.PipelineService/CreateTrainingPipeline', - request_serializer=pipeline_service.CreateTrainingPipelineRequest.serialize, - response_deserializer=gca_training_pipeline.TrainingPipeline.deserialize, - ) - return self._stubs['create_training_pipeline'] - - @property - def get_training_pipeline(self) -> Callable[ - [pipeline_service.GetTrainingPipelineRequest], - Awaitable[training_pipeline.TrainingPipeline]]: - r"""Return a callable for the get training pipeline method over gRPC. - - Gets a TrainingPipeline. - - Returns: - Callable[[~.GetTrainingPipelineRequest], - Awaitable[~.TrainingPipeline]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_training_pipeline' not in self._stubs: - self._stubs['get_training_pipeline'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.PipelineService/GetTrainingPipeline', - request_serializer=pipeline_service.GetTrainingPipelineRequest.serialize, - response_deserializer=training_pipeline.TrainingPipeline.deserialize, - ) - return self._stubs['get_training_pipeline'] - - @property - def list_training_pipelines(self) -> Callable[ - [pipeline_service.ListTrainingPipelinesRequest], - Awaitable[pipeline_service.ListTrainingPipelinesResponse]]: - r"""Return a callable for the list training pipelines method over gRPC. - - Lists TrainingPipelines in a Location. - - Returns: - Callable[[~.ListTrainingPipelinesRequest], - Awaitable[~.ListTrainingPipelinesResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_training_pipelines' not in self._stubs: - self._stubs['list_training_pipelines'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.PipelineService/ListTrainingPipelines', - request_serializer=pipeline_service.ListTrainingPipelinesRequest.serialize, - response_deserializer=pipeline_service.ListTrainingPipelinesResponse.deserialize, - ) - return self._stubs['list_training_pipelines'] - - @property - def delete_training_pipeline(self) -> Callable[ - [pipeline_service.DeleteTrainingPipelineRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete training pipeline method over gRPC. - - Deletes a TrainingPipeline. - - Returns: - Callable[[~.DeleteTrainingPipelineRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_training_pipeline' not in self._stubs: - self._stubs['delete_training_pipeline'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.PipelineService/DeleteTrainingPipeline', - request_serializer=pipeline_service.DeleteTrainingPipelineRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_training_pipeline'] - - @property - def cancel_training_pipeline(self) -> Callable[ - [pipeline_service.CancelTrainingPipelineRequest], - Awaitable[empty_pb2.Empty]]: - r"""Return a callable for the cancel training pipeline method over gRPC. - - Cancels a TrainingPipeline. Starts asynchronous cancellation on - the TrainingPipeline. The server makes a best effort to cancel - the pipeline, but success is not guaranteed. Clients can use - [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.GetTrainingPipeline] - or other methods to check whether the cancellation succeeded or - whether the pipeline completed despite cancellation. On - successful cancellation, the TrainingPipeline is not deleted; - instead it becomes a pipeline with a - [TrainingPipeline.error][google.cloud.aiplatform.v1.TrainingPipeline.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [TrainingPipeline.state][google.cloud.aiplatform.v1.TrainingPipeline.state] - is set to ``CANCELLED``. - - Returns: - Callable[[~.CancelTrainingPipelineRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'cancel_training_pipeline' not in self._stubs: - self._stubs['cancel_training_pipeline'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.PipelineService/CancelTrainingPipeline', - request_serializer=pipeline_service.CancelTrainingPipelineRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['cancel_training_pipeline'] - - @property - def create_pipeline_job(self) -> Callable[ - [pipeline_service.CreatePipelineJobRequest], - Awaitable[gca_pipeline_job.PipelineJob]]: - r"""Return a callable for the create pipeline job method over gRPC. - - Creates a PipelineJob. A PipelineJob will run - immediately when created. - - Returns: - Callable[[~.CreatePipelineJobRequest], - Awaitable[~.PipelineJob]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_pipeline_job' not in self._stubs: - self._stubs['create_pipeline_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.PipelineService/CreatePipelineJob', - request_serializer=pipeline_service.CreatePipelineJobRequest.serialize, - response_deserializer=gca_pipeline_job.PipelineJob.deserialize, - ) - return self._stubs['create_pipeline_job'] - - @property - def get_pipeline_job(self) -> Callable[ - [pipeline_service.GetPipelineJobRequest], - Awaitable[pipeline_job.PipelineJob]]: - r"""Return a callable for the get pipeline job method over gRPC. - - Gets a PipelineJob. - - Returns: - Callable[[~.GetPipelineJobRequest], - Awaitable[~.PipelineJob]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_pipeline_job' not in self._stubs: - self._stubs['get_pipeline_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.PipelineService/GetPipelineJob', - request_serializer=pipeline_service.GetPipelineJobRequest.serialize, - response_deserializer=pipeline_job.PipelineJob.deserialize, - ) - return self._stubs['get_pipeline_job'] - - @property - def list_pipeline_jobs(self) -> Callable[ - [pipeline_service.ListPipelineJobsRequest], - Awaitable[pipeline_service.ListPipelineJobsResponse]]: - r"""Return a callable for the list pipeline jobs method over gRPC. - - Lists PipelineJobs in a Location. - - Returns: - Callable[[~.ListPipelineJobsRequest], - Awaitable[~.ListPipelineJobsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_pipeline_jobs' not in self._stubs: - self._stubs['list_pipeline_jobs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.PipelineService/ListPipelineJobs', - request_serializer=pipeline_service.ListPipelineJobsRequest.serialize, - response_deserializer=pipeline_service.ListPipelineJobsResponse.deserialize, - ) - return self._stubs['list_pipeline_jobs'] - - @property - def delete_pipeline_job(self) -> Callable[ - [pipeline_service.DeletePipelineJobRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete pipeline job method over gRPC. - - Deletes a PipelineJob. - - Returns: - Callable[[~.DeletePipelineJobRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_pipeline_job' not in self._stubs: - self._stubs['delete_pipeline_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.PipelineService/DeletePipelineJob', - request_serializer=pipeline_service.DeletePipelineJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_pipeline_job'] - - @property - def cancel_pipeline_job(self) -> Callable[ - [pipeline_service.CancelPipelineJobRequest], - Awaitable[empty_pb2.Empty]]: - r"""Return a callable for the cancel pipeline job method over gRPC. - - Cancels a PipelineJob. Starts asynchronous cancellation on the - PipelineJob. The server makes a best effort to cancel the - pipeline, but success is not guaranteed. Clients can use - [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1.PipelineService.GetPipelineJob] - or other methods to check whether the cancellation succeeded or - whether the pipeline completed despite cancellation. On - successful cancellation, the PipelineJob is not deleted; instead - it becomes a pipeline with a - [PipelineJob.error][google.cloud.aiplatform.v1.PipelineJob.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [PipelineJob.state][google.cloud.aiplatform.v1.PipelineJob.state] - is set to ``CANCELLED``. - - Returns: - Callable[[~.CancelPipelineJobRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'cancel_pipeline_job' not in self._stubs: - self._stubs['cancel_pipeline_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.PipelineService/CancelPipelineJob', - request_serializer=pipeline_service.CancelPipelineJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['cancel_pipeline_job'] - - def close(self): - return self.grpc_channel.close() - - -__all__ = ( - 'PipelineServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/__init__.py deleted file mode 100644 index 13c5d11c66..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import PredictionServiceClient -from .async_client import PredictionServiceAsyncClient - -__all__ = ( - 'PredictionServiceClient', - 'PredictionServiceAsyncClient', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/async_client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/async_client.py deleted file mode 100644 index bcc1f9aff1..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/async_client.py +++ /dev/null @@ -1,574 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core.client_options import ClientOptions -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api import httpbody_pb2 # type: ignore -from google.cloud.aiplatform_v1.types import explanation -from google.cloud.aiplatform_v1.types import prediction_service -from google.protobuf import any_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from .transports.base import PredictionServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import PredictionServiceGrpcAsyncIOTransport -from .client import PredictionServiceClient - - -class PredictionServiceAsyncClient: - """A service for online predictions and explanations.""" - - _client: PredictionServiceClient - - DEFAULT_ENDPOINT = PredictionServiceClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = PredictionServiceClient.DEFAULT_MTLS_ENDPOINT - - endpoint_path = staticmethod(PredictionServiceClient.endpoint_path) - parse_endpoint_path = staticmethod(PredictionServiceClient.parse_endpoint_path) - model_path = staticmethod(PredictionServiceClient.model_path) - parse_model_path = staticmethod(PredictionServiceClient.parse_model_path) - common_billing_account_path = staticmethod(PredictionServiceClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(PredictionServiceClient.parse_common_billing_account_path) - common_folder_path = staticmethod(PredictionServiceClient.common_folder_path) - parse_common_folder_path = staticmethod(PredictionServiceClient.parse_common_folder_path) - common_organization_path = staticmethod(PredictionServiceClient.common_organization_path) - parse_common_organization_path = staticmethod(PredictionServiceClient.parse_common_organization_path) - common_project_path = staticmethod(PredictionServiceClient.common_project_path) - parse_common_project_path = staticmethod(PredictionServiceClient.parse_common_project_path) - common_location_path = staticmethod(PredictionServiceClient.common_location_path) - parse_common_location_path = staticmethod(PredictionServiceClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - PredictionServiceAsyncClient: The constructed client. - """ - return PredictionServiceClient.from_service_account_info.__func__(PredictionServiceAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - PredictionServiceAsyncClient: The constructed client. - """ - return PredictionServiceClient.from_service_account_file.__func__(PredictionServiceAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> PredictionServiceTransport: - """Returns the transport used by the client instance. - - Returns: - PredictionServiceTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(PredictionServiceClient).get_transport_class, type(PredictionServiceClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, PredictionServiceTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the prediction service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.PredictionServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = PredictionServiceClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def predict(self, - request: Union[prediction_service.PredictRequest, dict] = None, - *, - endpoint: str = None, - instances: Sequence[struct_pb2.Value] = None, - parameters: struct_pb2.Value = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> prediction_service.PredictResponse: - r"""Perform an online prediction. - - Args: - request (Union[google.cloud.aiplatform_v1.types.PredictRequest, dict]): - The request object. Request message for - [PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict]. - endpoint (:class:`str`): - Required. The name of the Endpoint requested to serve - the prediction. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - - This corresponds to the ``endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - instances (:class:`Sequence[google.protobuf.struct_pb2.Value]`): - Required. The instances that are the input to the - prediction call. A DeployedModel may have an upper limit - on the number of instances it supports per request, and - when it is exceeded the prediction call errors in case - of AutoML Models, or, in case of customer created - Models, the behaviour is as documented by that Model. - The schema of any single instance may be specified via - Endpoint's DeployedModels' - [Model's][google.cloud.aiplatform.v1.DeployedModel.model] - [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] - [instance_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri]. - - This corresponds to the ``instances`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - parameters (:class:`google.protobuf.struct_pb2.Value`): - The parameters that govern the prediction. The schema of - the parameters may be specified via Endpoint's - DeployedModels' [Model's - ][google.cloud.aiplatform.v1.DeployedModel.model] - [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] - [parameters_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.parameters_schema_uri]. - - This corresponds to the ``parameters`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.PredictResponse: - Response message for - [PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, instances, parameters]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = prediction_service.PredictRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if endpoint is not None: - request.endpoint = endpoint - if parameters is not None: - request.parameters = parameters - if instances: - request.instances.extend(instances) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.predict, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("endpoint", request.endpoint), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def raw_predict(self, - request: Union[prediction_service.RawPredictRequest, dict] = None, - *, - endpoint: str = None, - http_body: httpbody_pb2.HttpBody = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> httpbody_pb2.HttpBody: - r"""Perform an online prediction with an arbitrary HTTP payload. - - The response includes the following HTTP headers: - - - ``X-Vertex-AI-Endpoint-Id``: ID of the - [Endpoint][google.cloud.aiplatform.v1.Endpoint] that served - this prediction. - - - ``X-Vertex-AI-Deployed-Model-Id``: ID of the Endpoint's - [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] - that served this prediction. - - Args: - request (Union[google.cloud.aiplatform_v1.types.RawPredictRequest, dict]): - The request object. Request message for - [PredictionService.RawPredict][google.cloud.aiplatform.v1.PredictionService.RawPredict]. - endpoint (:class:`str`): - Required. The name of the Endpoint requested to serve - the prediction. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - - This corresponds to the ``endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - http_body (:class:`google.api.httpbody_pb2.HttpBody`): - The prediction input. Supports HTTP headers and - arbitrary data payload. - - A - [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] - may have an upper limit on the number of instances it - supports per request. When this limit it is exceeded for - an AutoML model, the - [RawPredict][google.cloud.aiplatform.v1.PredictionService.RawPredict] - method returns an error. When this limit is exceeded for - a custom-trained model, the behavior varies depending on - the model. - - You can specify the schema for each instance in the - [predict_schemata.instance_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri] - field when you create a - [Model][google.cloud.aiplatform.v1.Model]. This schema - applies when you deploy the ``Model`` as a - ``DeployedModel`` to an - [Endpoint][google.cloud.aiplatform.v1.Endpoint] and use - the ``RawPredict`` method. - - This corresponds to the ``http_body`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api.httpbody_pb2.HttpBody: - Message that represents an arbitrary HTTP body. It should only be used for - payload formats that can't be represented as JSON, - such as raw binary or an HTML page. - - This message can be used both in streaming and - non-streaming API methods in the request as well as - the response. - - It can be used as a top-level request field, which is - convenient if one wants to extract parameters from - either the URL or HTTP template into the request - fields and also want access to the raw HTTP body. - - Example: - - message GetResourceRequest { - // A unique request id. string request_id = 1; - - // The raw HTTP body is bound to this field. - google.api.HttpBody http_body = 2; - - } - - service ResourceService { - rpc GetResource(GetResourceRequest) - returns (google.api.HttpBody); - - rpc UpdateResource(google.api.HttpBody) - returns (google.protobuf.Empty); - - } - - Example with streaming methods: - - service CaldavService { - rpc GetCalendar(stream google.api.HttpBody) - returns (stream google.api.HttpBody); - - rpc UpdateCalendar(stream google.api.HttpBody) - returns (stream google.api.HttpBody); - - } - - Use of this type only changes how the request and - response bodies are handled, all other features will - continue to work unchanged. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, http_body]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = prediction_service.RawPredictRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if endpoint is not None: - request.endpoint = endpoint - if http_body is not None: - request.http_body = http_body - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.raw_predict, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("endpoint", request.endpoint), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def explain(self, - request: Union[prediction_service.ExplainRequest, dict] = None, - *, - endpoint: str = None, - instances: Sequence[struct_pb2.Value] = None, - parameters: struct_pb2.Value = None, - deployed_model_id: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> prediction_service.ExplainResponse: - r"""Perform an online explanation. - - If - [deployed_model_id][google.cloud.aiplatform.v1.ExplainRequest.deployed_model_id] - is specified, the corresponding DeployModel must have - [explanation_spec][google.cloud.aiplatform.v1.DeployedModel.explanation_spec] - populated. If - [deployed_model_id][google.cloud.aiplatform.v1.ExplainRequest.deployed_model_id] - is not specified, all DeployedModels must have - [explanation_spec][google.cloud.aiplatform.v1.DeployedModel.explanation_spec] - populated. Only deployed AutoML tabular Models have - explanation_spec. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ExplainRequest, dict]): - The request object. Request message for - [PredictionService.Explain][google.cloud.aiplatform.v1.PredictionService.Explain]. - endpoint (:class:`str`): - Required. The name of the Endpoint requested to serve - the explanation. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - - This corresponds to the ``endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - instances (:class:`Sequence[google.protobuf.struct_pb2.Value]`): - Required. The instances that are the input to the - explanation call. A DeployedModel may have an upper - limit on the number of instances it supports per - request, and when it is exceeded the explanation call - errors in case of AutoML Models, or, in case of customer - created Models, the behaviour is as documented by that - Model. The schema of any single instance may be - specified via Endpoint's DeployedModels' - [Model's][google.cloud.aiplatform.v1.DeployedModel.model] - [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] - [instance_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri]. - - This corresponds to the ``instances`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - parameters (:class:`google.protobuf.struct_pb2.Value`): - The parameters that govern the prediction. The schema of - the parameters may be specified via Endpoint's - DeployedModels' [Model's - ][google.cloud.aiplatform.v1.DeployedModel.model] - [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] - [parameters_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.parameters_schema_uri]. - - This corresponds to the ``parameters`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - deployed_model_id (:class:`str`): - If specified, this ExplainRequest will be served by the - chosen DeployedModel, overriding - [Endpoint.traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split]. - - This corresponds to the ``deployed_model_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.ExplainResponse: - Response message for - [PredictionService.Explain][google.cloud.aiplatform.v1.PredictionService.Explain]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, instances, parameters, deployed_model_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = prediction_service.ExplainRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if endpoint is not None: - request.endpoint = endpoint - if parameters is not None: - request.parameters = parameters - if deployed_model_id is not None: - request.deployed_model_id = deployed_model_id - if instances: - request.instances.extend(instances) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.explain, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("endpoint", request.endpoint), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def __aenter__(self): - return self - - async def __aexit__(self, exc_type, exc, tb): - await self.transport.close() - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "PredictionServiceAsyncClient", -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/client.py deleted file mode 100644 index 5e221745b7..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/client.py +++ /dev/null @@ -1,781 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import os -import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api import httpbody_pb2 # type: ignore -from google.cloud.aiplatform_v1.types import explanation -from google.cloud.aiplatform_v1.types import prediction_service -from google.protobuf import any_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from .transports.base import PredictionServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import PredictionServiceGrpcTransport -from .transports.grpc_asyncio import PredictionServiceGrpcAsyncIOTransport - - -class PredictionServiceClientMeta(type): - """Metaclass for the PredictionService client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[PredictionServiceTransport]] - _transport_registry["grpc"] = PredictionServiceGrpcTransport - _transport_registry["grpc_asyncio"] = PredictionServiceGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[PredictionServiceTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class PredictionServiceClient(metaclass=PredictionServiceClientMeta): - """A service for online predictions and explanations.""" - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "aiplatform.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - PredictionServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - PredictionServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> PredictionServiceTransport: - """Returns the transport used by the client instance. - - Returns: - PredictionServiceTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def endpoint_path(project: str,location: str,endpoint: str,) -> str: - """Returns a fully-qualified endpoint string.""" - return "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) - - @staticmethod - def parse_endpoint_path(path: str) -> Dict[str,str]: - """Parses a endpoint path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/endpoints/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def model_path(project: str,location: str,model: str,) -> str: - """Returns a fully-qualified model string.""" - return "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) - - @staticmethod - def parse_model_path(path: str) -> Dict[str,str]: - """Parses a model path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, PredictionServiceTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the prediction service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, PredictionServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): - raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, PredictionServiceTransport): - # transport is a PredictionServiceTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - always_use_jwt_access=True, - ) - - def predict(self, - request: Union[prediction_service.PredictRequest, dict] = None, - *, - endpoint: str = None, - instances: Sequence[struct_pb2.Value] = None, - parameters: struct_pb2.Value = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> prediction_service.PredictResponse: - r"""Perform an online prediction. - - Args: - request (Union[google.cloud.aiplatform_v1.types.PredictRequest, dict]): - The request object. Request message for - [PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict]. - endpoint (str): - Required. The name of the Endpoint requested to serve - the prediction. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - - This corresponds to the ``endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - instances (Sequence[google.protobuf.struct_pb2.Value]): - Required. The instances that are the input to the - prediction call. A DeployedModel may have an upper limit - on the number of instances it supports per request, and - when it is exceeded the prediction call errors in case - of AutoML Models, or, in case of customer created - Models, the behaviour is as documented by that Model. - The schema of any single instance may be specified via - Endpoint's DeployedModels' - [Model's][google.cloud.aiplatform.v1.DeployedModel.model] - [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] - [instance_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri]. - - This corresponds to the ``instances`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - parameters (google.protobuf.struct_pb2.Value): - The parameters that govern the prediction. The schema of - the parameters may be specified via Endpoint's - DeployedModels' [Model's - ][google.cloud.aiplatform.v1.DeployedModel.model] - [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] - [parameters_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.parameters_schema_uri]. - - This corresponds to the ``parameters`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.PredictResponse: - Response message for - [PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, instances, parameters]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a prediction_service.PredictRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, prediction_service.PredictRequest): - request = prediction_service.PredictRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if endpoint is not None: - request.endpoint = endpoint - if instances is not None: - request.instances.extend(instances) - if parameters is not None: - request.parameters = parameters - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.predict] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("endpoint", request.endpoint), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def raw_predict(self, - request: Union[prediction_service.RawPredictRequest, dict] = None, - *, - endpoint: str = None, - http_body: httpbody_pb2.HttpBody = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> httpbody_pb2.HttpBody: - r"""Perform an online prediction with an arbitrary HTTP payload. - - The response includes the following HTTP headers: - - - ``X-Vertex-AI-Endpoint-Id``: ID of the - [Endpoint][google.cloud.aiplatform.v1.Endpoint] that served - this prediction. - - - ``X-Vertex-AI-Deployed-Model-Id``: ID of the Endpoint's - [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] - that served this prediction. - - Args: - request (Union[google.cloud.aiplatform_v1.types.RawPredictRequest, dict]): - The request object. Request message for - [PredictionService.RawPredict][google.cloud.aiplatform.v1.PredictionService.RawPredict]. - endpoint (str): - Required. The name of the Endpoint requested to serve - the prediction. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - - This corresponds to the ``endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - http_body (google.api.httpbody_pb2.HttpBody): - The prediction input. Supports HTTP headers and - arbitrary data payload. - - A - [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] - may have an upper limit on the number of instances it - supports per request. When this limit it is exceeded for - an AutoML model, the - [RawPredict][google.cloud.aiplatform.v1.PredictionService.RawPredict] - method returns an error. When this limit is exceeded for - a custom-trained model, the behavior varies depending on - the model. - - You can specify the schema for each instance in the - [predict_schemata.instance_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri] - field when you create a - [Model][google.cloud.aiplatform.v1.Model]. This schema - applies when you deploy the ``Model`` as a - ``DeployedModel`` to an - [Endpoint][google.cloud.aiplatform.v1.Endpoint] and use - the ``RawPredict`` method. - - This corresponds to the ``http_body`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api.httpbody_pb2.HttpBody: - Message that represents an arbitrary HTTP body. It should only be used for - payload formats that can't be represented as JSON, - such as raw binary or an HTML page. - - This message can be used both in streaming and - non-streaming API methods in the request as well as - the response. - - It can be used as a top-level request field, which is - convenient if one wants to extract parameters from - either the URL or HTTP template into the request - fields and also want access to the raw HTTP body. - - Example: - - message GetResourceRequest { - // A unique request id. string request_id = 1; - - // The raw HTTP body is bound to this field. - google.api.HttpBody http_body = 2; - - } - - service ResourceService { - rpc GetResource(GetResourceRequest) - returns (google.api.HttpBody); - - rpc UpdateResource(google.api.HttpBody) - returns (google.protobuf.Empty); - - } - - Example with streaming methods: - - service CaldavService { - rpc GetCalendar(stream google.api.HttpBody) - returns (stream google.api.HttpBody); - - rpc UpdateCalendar(stream google.api.HttpBody) - returns (stream google.api.HttpBody); - - } - - Use of this type only changes how the request and - response bodies are handled, all other features will - continue to work unchanged. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, http_body]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a prediction_service.RawPredictRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, prediction_service.RawPredictRequest): - request = prediction_service.RawPredictRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if endpoint is not None: - request.endpoint = endpoint - if http_body is not None: - request.http_body = http_body - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.raw_predict] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("endpoint", request.endpoint), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def explain(self, - request: Union[prediction_service.ExplainRequest, dict] = None, - *, - endpoint: str = None, - instances: Sequence[struct_pb2.Value] = None, - parameters: struct_pb2.Value = None, - deployed_model_id: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> prediction_service.ExplainResponse: - r"""Perform an online explanation. - - If - [deployed_model_id][google.cloud.aiplatform.v1.ExplainRequest.deployed_model_id] - is specified, the corresponding DeployModel must have - [explanation_spec][google.cloud.aiplatform.v1.DeployedModel.explanation_spec] - populated. If - [deployed_model_id][google.cloud.aiplatform.v1.ExplainRequest.deployed_model_id] - is not specified, all DeployedModels must have - [explanation_spec][google.cloud.aiplatform.v1.DeployedModel.explanation_spec] - populated. Only deployed AutoML tabular Models have - explanation_spec. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ExplainRequest, dict]): - The request object. Request message for - [PredictionService.Explain][google.cloud.aiplatform.v1.PredictionService.Explain]. - endpoint (str): - Required. The name of the Endpoint requested to serve - the explanation. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - - This corresponds to the ``endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - instances (Sequence[google.protobuf.struct_pb2.Value]): - Required. The instances that are the input to the - explanation call. A DeployedModel may have an upper - limit on the number of instances it supports per - request, and when it is exceeded the explanation call - errors in case of AutoML Models, or, in case of customer - created Models, the behaviour is as documented by that - Model. The schema of any single instance may be - specified via Endpoint's DeployedModels' - [Model's][google.cloud.aiplatform.v1.DeployedModel.model] - [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] - [instance_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri]. - - This corresponds to the ``instances`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - parameters (google.protobuf.struct_pb2.Value): - The parameters that govern the prediction. The schema of - the parameters may be specified via Endpoint's - DeployedModels' [Model's - ][google.cloud.aiplatform.v1.DeployedModel.model] - [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] - [parameters_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.parameters_schema_uri]. - - This corresponds to the ``parameters`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - deployed_model_id (str): - If specified, this ExplainRequest will be served by the - chosen DeployedModel, overriding - [Endpoint.traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split]. - - This corresponds to the ``deployed_model_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.ExplainResponse: - Response message for - [PredictionService.Explain][google.cloud.aiplatform.v1.PredictionService.Explain]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, instances, parameters, deployed_model_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a prediction_service.ExplainRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, prediction_service.ExplainRequest): - request = prediction_service.ExplainRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if endpoint is not None: - request.endpoint = endpoint - if instances is not None: - request.instances.extend(instances) - if parameters is not None: - request.parameters = parameters - if deployed_model_id is not None: - request.deployed_model_id = deployed_model_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.explain] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("endpoint", request.endpoint), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - """Releases underlying transport's resources. - - .. warning:: - ONLY use as a context manager if the transport is NOT shared - with other clients! Exiting the with block will CLOSE the transport - and may cause errors in other clients! - """ - self.transport.close() - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "PredictionServiceClient", -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/__init__.py deleted file mode 100644 index d747de2ce9..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import PredictionServiceTransport -from .grpc import PredictionServiceGrpcTransport -from .grpc_asyncio import PredictionServiceGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[PredictionServiceTransport]] -_transport_registry['grpc'] = PredictionServiceGrpcTransport -_transport_registry['grpc_asyncio'] = PredictionServiceGrpcAsyncIOTransport - -__all__ = ( - 'PredictionServiceTransport', - 'PredictionServiceGrpcTransport', - 'PredictionServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/base.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/base.py deleted file mode 100644 index a794abdf1b..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/base.py +++ /dev/null @@ -1,175 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import pkg_resources - -import google.auth # type: ignore -import google.api_core -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.api import httpbody_pb2 # type: ignore -from google.cloud.aiplatform_v1.types import prediction_service - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-aiplatform', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -class PredictionServiceTransport(abc.ABC): - """Abstract transport class for PredictionService.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'aiplatform.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials are service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.predict: gapic_v1.method.wrap_method( - self.predict, - default_timeout=None, - client_info=client_info, - ), - self.raw_predict: gapic_v1.method.wrap_method( - self.raw_predict, - default_timeout=None, - client_info=client_info, - ), - self.explain: gapic_v1.method.wrap_method( - self.explain, - default_timeout=None, - client_info=client_info, - ), - } - - def close(self): - """Closes resources associated with the transport. - - .. warning:: - Only call this method if the transport is NOT shared - with other clients - this may cause errors in other clients! - """ - raise NotImplementedError() - - @property - def predict(self) -> Callable[ - [prediction_service.PredictRequest], - Union[ - prediction_service.PredictResponse, - Awaitable[prediction_service.PredictResponse] - ]]: - raise NotImplementedError() - - @property - def raw_predict(self) -> Callable[ - [prediction_service.RawPredictRequest], - Union[ - httpbody_pb2.HttpBody, - Awaitable[httpbody_pb2.HttpBody] - ]]: - raise NotImplementedError() - - @property - def explain(self) -> Callable[ - [prediction_service.ExplainRequest], - Union[ - prediction_service.ExplainResponse, - Awaitable[prediction_service.ExplainResponse] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'PredictionServiceTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc.py deleted file mode 100644 index 043fe5ec21..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc.py +++ /dev/null @@ -1,328 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers -from google.api_core import gapic_v1 -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.api import httpbody_pb2 # type: ignore -from google.cloud.aiplatform_v1.types import prediction_service -from .base import PredictionServiceTransport, DEFAULT_CLIENT_INFO - - -class PredictionServiceGrpcTransport(PredictionServiceTransport): - """gRPC backend transport for PredictionService. - - A service for online predictions and explanations. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def predict(self) -> Callable[ - [prediction_service.PredictRequest], - prediction_service.PredictResponse]: - r"""Return a callable for the predict method over gRPC. - - Perform an online prediction. - - Returns: - Callable[[~.PredictRequest], - ~.PredictResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'predict' not in self._stubs: - self._stubs['predict'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.PredictionService/Predict', - request_serializer=prediction_service.PredictRequest.serialize, - response_deserializer=prediction_service.PredictResponse.deserialize, - ) - return self._stubs['predict'] - - @property - def raw_predict(self) -> Callable[ - [prediction_service.RawPredictRequest], - httpbody_pb2.HttpBody]: - r"""Return a callable for the raw predict method over gRPC. - - Perform an online prediction with an arbitrary HTTP payload. - - The response includes the following HTTP headers: - - - ``X-Vertex-AI-Endpoint-Id``: ID of the - [Endpoint][google.cloud.aiplatform.v1.Endpoint] that served - this prediction. - - - ``X-Vertex-AI-Deployed-Model-Id``: ID of the Endpoint's - [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] - that served this prediction. - - Returns: - Callable[[~.RawPredictRequest], - ~.HttpBody]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'raw_predict' not in self._stubs: - self._stubs['raw_predict'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.PredictionService/RawPredict', - request_serializer=prediction_service.RawPredictRequest.serialize, - response_deserializer=httpbody_pb2.HttpBody.FromString, - ) - return self._stubs['raw_predict'] - - @property - def explain(self) -> Callable[ - [prediction_service.ExplainRequest], - prediction_service.ExplainResponse]: - r"""Return a callable for the explain method over gRPC. - - Perform an online explanation. - - If - [deployed_model_id][google.cloud.aiplatform.v1.ExplainRequest.deployed_model_id] - is specified, the corresponding DeployModel must have - [explanation_spec][google.cloud.aiplatform.v1.DeployedModel.explanation_spec] - populated. If - [deployed_model_id][google.cloud.aiplatform.v1.ExplainRequest.deployed_model_id] - is not specified, all DeployedModels must have - [explanation_spec][google.cloud.aiplatform.v1.DeployedModel.explanation_spec] - populated. Only deployed AutoML tabular Models have - explanation_spec. - - Returns: - Callable[[~.ExplainRequest], - ~.ExplainResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'explain' not in self._stubs: - self._stubs['explain'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.PredictionService/Explain', - request_serializer=prediction_service.ExplainRequest.serialize, - response_deserializer=prediction_service.ExplainResponse.deserialize, - ) - return self._stubs['explain'] - - def close(self): - self.grpc_channel.close() - -__all__ = ( - 'PredictionServiceGrpcTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc_asyncio.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc_asyncio.py deleted file mode 100644 index 875ef5c10b..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/prediction_service/transports/grpc_asyncio.py +++ /dev/null @@ -1,332 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers_async -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.api import httpbody_pb2 # type: ignore -from google.cloud.aiplatform_v1.types import prediction_service -from .base import PredictionServiceTransport, DEFAULT_CLIENT_INFO -from .grpc import PredictionServiceGrpcTransport - - -class PredictionServiceGrpcAsyncIOTransport(PredictionServiceTransport): - """gRPC AsyncIO backend transport for PredictionService. - - A service for online predictions and explanations. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def predict(self) -> Callable[ - [prediction_service.PredictRequest], - Awaitable[prediction_service.PredictResponse]]: - r"""Return a callable for the predict method over gRPC. - - Perform an online prediction. - - Returns: - Callable[[~.PredictRequest], - Awaitable[~.PredictResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'predict' not in self._stubs: - self._stubs['predict'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.PredictionService/Predict', - request_serializer=prediction_service.PredictRequest.serialize, - response_deserializer=prediction_service.PredictResponse.deserialize, - ) - return self._stubs['predict'] - - @property - def raw_predict(self) -> Callable[ - [prediction_service.RawPredictRequest], - Awaitable[httpbody_pb2.HttpBody]]: - r"""Return a callable for the raw predict method over gRPC. - - Perform an online prediction with an arbitrary HTTP payload. - - The response includes the following HTTP headers: - - - ``X-Vertex-AI-Endpoint-Id``: ID of the - [Endpoint][google.cloud.aiplatform.v1.Endpoint] that served - this prediction. - - - ``X-Vertex-AI-Deployed-Model-Id``: ID of the Endpoint's - [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] - that served this prediction. - - Returns: - Callable[[~.RawPredictRequest], - Awaitable[~.HttpBody]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'raw_predict' not in self._stubs: - self._stubs['raw_predict'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.PredictionService/RawPredict', - request_serializer=prediction_service.RawPredictRequest.serialize, - response_deserializer=httpbody_pb2.HttpBody.FromString, - ) - return self._stubs['raw_predict'] - - @property - def explain(self) -> Callable[ - [prediction_service.ExplainRequest], - Awaitable[prediction_service.ExplainResponse]]: - r"""Return a callable for the explain method over gRPC. - - Perform an online explanation. - - If - [deployed_model_id][google.cloud.aiplatform.v1.ExplainRequest.deployed_model_id] - is specified, the corresponding DeployModel must have - [explanation_spec][google.cloud.aiplatform.v1.DeployedModel.explanation_spec] - populated. If - [deployed_model_id][google.cloud.aiplatform.v1.ExplainRequest.deployed_model_id] - is not specified, all DeployedModels must have - [explanation_spec][google.cloud.aiplatform.v1.DeployedModel.explanation_spec] - populated. Only deployed AutoML tabular Models have - explanation_spec. - - Returns: - Callable[[~.ExplainRequest], - Awaitable[~.ExplainResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'explain' not in self._stubs: - self._stubs['explain'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.PredictionService/Explain', - request_serializer=prediction_service.ExplainRequest.serialize, - response_deserializer=prediction_service.ExplainResponse.deserialize, - ) - return self._stubs['explain'] - - def close(self): - return self.grpc_channel.close() - - -__all__ = ( - 'PredictionServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/__init__.py deleted file mode 100644 index 04af59e5fa..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import SpecialistPoolServiceClient -from .async_client import SpecialistPoolServiceAsyncClient - -__all__ = ( - 'SpecialistPoolServiceClient', - 'SpecialistPoolServiceAsyncClient', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/async_client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/async_client.py deleted file mode 100644 index da26684f57..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/async_client.py +++ /dev/null @@ -1,658 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core.client_options import ClientOptions -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1.services.specialist_pool_service import pagers -from google.cloud.aiplatform_v1.types import operation as gca_operation -from google.cloud.aiplatform_v1.types import specialist_pool -from google.cloud.aiplatform_v1.types import specialist_pool as gca_specialist_pool -from google.cloud.aiplatform_v1.types import specialist_pool_service -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from .transports.base import SpecialistPoolServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import SpecialistPoolServiceGrpcAsyncIOTransport -from .client import SpecialistPoolServiceClient - - -class SpecialistPoolServiceAsyncClient: - """A service for creating and managing Customer SpecialistPools. - When customers start Data Labeling jobs, they can reuse/create - Specialist Pools to bring their own Specialists to label the - data. Customers can add/remove Managers for the Specialist Pool - on Cloud console, then Managers will get email notifications to - manage Specialists and tasks on CrowdCompute console. - """ - - _client: SpecialistPoolServiceClient - - DEFAULT_ENDPOINT = SpecialistPoolServiceClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = SpecialistPoolServiceClient.DEFAULT_MTLS_ENDPOINT - - specialist_pool_path = staticmethod(SpecialistPoolServiceClient.specialist_pool_path) - parse_specialist_pool_path = staticmethod(SpecialistPoolServiceClient.parse_specialist_pool_path) - common_billing_account_path = staticmethod(SpecialistPoolServiceClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(SpecialistPoolServiceClient.parse_common_billing_account_path) - common_folder_path = staticmethod(SpecialistPoolServiceClient.common_folder_path) - parse_common_folder_path = staticmethod(SpecialistPoolServiceClient.parse_common_folder_path) - common_organization_path = staticmethod(SpecialistPoolServiceClient.common_organization_path) - parse_common_organization_path = staticmethod(SpecialistPoolServiceClient.parse_common_organization_path) - common_project_path = staticmethod(SpecialistPoolServiceClient.common_project_path) - parse_common_project_path = staticmethod(SpecialistPoolServiceClient.parse_common_project_path) - common_location_path = staticmethod(SpecialistPoolServiceClient.common_location_path) - parse_common_location_path = staticmethod(SpecialistPoolServiceClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - SpecialistPoolServiceAsyncClient: The constructed client. - """ - return SpecialistPoolServiceClient.from_service_account_info.__func__(SpecialistPoolServiceAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - SpecialistPoolServiceAsyncClient: The constructed client. - """ - return SpecialistPoolServiceClient.from_service_account_file.__func__(SpecialistPoolServiceAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> SpecialistPoolServiceTransport: - """Returns the transport used by the client instance. - - Returns: - SpecialistPoolServiceTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(SpecialistPoolServiceClient).get_transport_class, type(SpecialistPoolServiceClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, SpecialistPoolServiceTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the specialist pool service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.SpecialistPoolServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = SpecialistPoolServiceClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def create_specialist_pool(self, - request: Union[specialist_pool_service.CreateSpecialistPoolRequest, dict] = None, - *, - parent: str = None, - specialist_pool: gca_specialist_pool.SpecialistPool = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Creates a SpecialistPool. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CreateSpecialistPoolRequest, dict]): - The request object. Request message for - [SpecialistPoolService.CreateSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.CreateSpecialistPool]. - parent (:class:`str`): - Required. The parent Project name for the new - SpecialistPool. The form is - ``projects/{project}/locations/{location}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - specialist_pool (:class:`google.cloud.aiplatform_v1.types.SpecialistPool`): - Required. The SpecialistPool to - create. - - This corresponds to the ``specialist_pool`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.SpecialistPool` SpecialistPool represents customers' own workforce to work on their data - labeling jobs. It includes a group of specialist - managers and workers. Managers are responsible for - managing the workers in this pool as well as - customers' data labeling jobs associated with this - pool. Customers create specialist pool as well as - start data labeling jobs on Cloud, managers and - workers handle the jobs using CrowdCompute console. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, specialist_pool]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = specialist_pool_service.CreateSpecialistPoolRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if specialist_pool is not None: - request.specialist_pool = specialist_pool - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_specialist_pool, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - gca_specialist_pool.SpecialistPool, - metadata_type=specialist_pool_service.CreateSpecialistPoolOperationMetadata, - ) - - # Done; return the response. - return response - - async def get_specialist_pool(self, - request: Union[specialist_pool_service.GetSpecialistPoolRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> specialist_pool.SpecialistPool: - r"""Gets a SpecialistPool. - - Args: - request (Union[google.cloud.aiplatform_v1.types.GetSpecialistPoolRequest, dict]): - The request object. Request message for - [SpecialistPoolService.GetSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.GetSpecialistPool]. - name (:class:`str`): - Required. The name of the SpecialistPool resource. The - form is - ``projects/{project}/locations/{location}/specialistPools/{specialist_pool}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.SpecialistPool: - SpecialistPool represents customers' - own workforce to work on their data - labeling jobs. It includes a group of - specialist managers and workers. - Managers are responsible for managing - the workers in this pool as well as - customers' data labeling jobs associated - with this pool. Customers create - specialist pool as well as start data - labeling jobs on Cloud, managers and - workers handle the jobs using - CrowdCompute console. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = specialist_pool_service.GetSpecialistPoolRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_specialist_pool, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_specialist_pools(self, - request: Union[specialist_pool_service.ListSpecialistPoolsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListSpecialistPoolsAsyncPager: - r"""Lists SpecialistPools in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListSpecialistPoolsRequest, dict]): - The request object. Request message for - [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1.SpecialistPoolService.ListSpecialistPools]. - parent (:class:`str`): - Required. The name of the SpecialistPool's parent - resource. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.specialist_pool_service.pagers.ListSpecialistPoolsAsyncPager: - Response message for - [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1.SpecialistPoolService.ListSpecialistPools]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = specialist_pool_service.ListSpecialistPoolsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_specialist_pools, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListSpecialistPoolsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_specialist_pool(self, - request: Union[specialist_pool_service.DeleteSpecialistPoolRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a SpecialistPool as well as all Specialists - in the pool. - - Args: - request (Union[google.cloud.aiplatform_v1.types.DeleteSpecialistPoolRequest, dict]): - The request object. Request message for - [SpecialistPoolService.DeleteSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.DeleteSpecialistPool]. - name (:class:`str`): - Required. The resource name of the SpecialistPool to - delete. Format: - ``projects/{project}/locations/{location}/specialistPools/{specialist_pool}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = specialist_pool_service.DeleteSpecialistPoolRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_specialist_pool, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def update_specialist_pool(self, - request: Union[specialist_pool_service.UpdateSpecialistPoolRequest, dict] = None, - *, - specialist_pool: gca_specialist_pool.SpecialistPool = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Updates a SpecialistPool. - - Args: - request (Union[google.cloud.aiplatform_v1.types.UpdateSpecialistPoolRequest, dict]): - The request object. Request message for - [SpecialistPoolService.UpdateSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.UpdateSpecialistPool]. - specialist_pool (:class:`google.cloud.aiplatform_v1.types.SpecialistPool`): - Required. The SpecialistPool which - replaces the resource on the server. - - This corresponds to the ``specialist_pool`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. The update mask applies to - the resource. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.SpecialistPool` SpecialistPool represents customers' own workforce to work on their data - labeling jobs. It includes a group of specialist - managers and workers. Managers are responsible for - managing the workers in this pool as well as - customers' data labeling jobs associated with this - pool. Customers create specialist pool as well as - start data labeling jobs on Cloud, managers and - workers handle the jobs using CrowdCompute console. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([specialist_pool, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = specialist_pool_service.UpdateSpecialistPoolRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if specialist_pool is not None: - request.specialist_pool = specialist_pool - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_specialist_pool, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("specialist_pool.name", request.specialist_pool.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - gca_specialist_pool.SpecialistPool, - metadata_type=specialist_pool_service.UpdateSpecialistPoolOperationMetadata, - ) - - # Done; return the response. - return response - - async def __aenter__(self): - return self - - async def __aexit__(self, exc_type, exc, tb): - await self.transport.close() - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "SpecialistPoolServiceAsyncClient", -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/client.py deleted file mode 100644 index 30a7988405..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/client.py +++ /dev/null @@ -1,856 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import os -import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1.services.specialist_pool_service import pagers -from google.cloud.aiplatform_v1.types import operation as gca_operation -from google.cloud.aiplatform_v1.types import specialist_pool -from google.cloud.aiplatform_v1.types import specialist_pool as gca_specialist_pool -from google.cloud.aiplatform_v1.types import specialist_pool_service -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from .transports.base import SpecialistPoolServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import SpecialistPoolServiceGrpcTransport -from .transports.grpc_asyncio import SpecialistPoolServiceGrpcAsyncIOTransport - - -class SpecialistPoolServiceClientMeta(type): - """Metaclass for the SpecialistPoolService client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[SpecialistPoolServiceTransport]] - _transport_registry["grpc"] = SpecialistPoolServiceGrpcTransport - _transport_registry["grpc_asyncio"] = SpecialistPoolServiceGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[SpecialistPoolServiceTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class SpecialistPoolServiceClient(metaclass=SpecialistPoolServiceClientMeta): - """A service for creating and managing Customer SpecialistPools. - When customers start Data Labeling jobs, they can reuse/create - Specialist Pools to bring their own Specialists to label the - data. Customers can add/remove Managers for the Specialist Pool - on Cloud console, then Managers will get email notifications to - manage Specialists and tasks on CrowdCompute console. - """ - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "aiplatform.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - SpecialistPoolServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - SpecialistPoolServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> SpecialistPoolServiceTransport: - """Returns the transport used by the client instance. - - Returns: - SpecialistPoolServiceTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def specialist_pool_path(project: str,location: str,specialist_pool: str,) -> str: - """Returns a fully-qualified specialist_pool string.""" - return "projects/{project}/locations/{location}/specialistPools/{specialist_pool}".format(project=project, location=location, specialist_pool=specialist_pool, ) - - @staticmethod - def parse_specialist_pool_path(path: str) -> Dict[str,str]: - """Parses a specialist_pool path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/specialistPools/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, SpecialistPoolServiceTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the specialist pool service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, SpecialistPoolServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): - raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, SpecialistPoolServiceTransport): - # transport is a SpecialistPoolServiceTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - always_use_jwt_access=True, - ) - - def create_specialist_pool(self, - request: Union[specialist_pool_service.CreateSpecialistPoolRequest, dict] = None, - *, - parent: str = None, - specialist_pool: gca_specialist_pool.SpecialistPool = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Creates a SpecialistPool. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CreateSpecialistPoolRequest, dict]): - The request object. Request message for - [SpecialistPoolService.CreateSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.CreateSpecialistPool]. - parent (str): - Required. The parent Project name for the new - SpecialistPool. The form is - ``projects/{project}/locations/{location}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - specialist_pool (google.cloud.aiplatform_v1.types.SpecialistPool): - Required. The SpecialistPool to - create. - - This corresponds to the ``specialist_pool`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.SpecialistPool` SpecialistPool represents customers' own workforce to work on their data - labeling jobs. It includes a group of specialist - managers and workers. Managers are responsible for - managing the workers in this pool as well as - customers' data labeling jobs associated with this - pool. Customers create specialist pool as well as - start data labeling jobs on Cloud, managers and - workers handle the jobs using CrowdCompute console. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, specialist_pool]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a specialist_pool_service.CreateSpecialistPoolRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, specialist_pool_service.CreateSpecialistPoolRequest): - request = specialist_pool_service.CreateSpecialistPoolRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if specialist_pool is not None: - request.specialist_pool = specialist_pool - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_specialist_pool] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - gca_specialist_pool.SpecialistPool, - metadata_type=specialist_pool_service.CreateSpecialistPoolOperationMetadata, - ) - - # Done; return the response. - return response - - def get_specialist_pool(self, - request: Union[specialist_pool_service.GetSpecialistPoolRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> specialist_pool.SpecialistPool: - r"""Gets a SpecialistPool. - - Args: - request (Union[google.cloud.aiplatform_v1.types.GetSpecialistPoolRequest, dict]): - The request object. Request message for - [SpecialistPoolService.GetSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.GetSpecialistPool]. - name (str): - Required. The name of the SpecialistPool resource. The - form is - ``projects/{project}/locations/{location}/specialistPools/{specialist_pool}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.SpecialistPool: - SpecialistPool represents customers' - own workforce to work on their data - labeling jobs. It includes a group of - specialist managers and workers. - Managers are responsible for managing - the workers in this pool as well as - customers' data labeling jobs associated - with this pool. Customers create - specialist pool as well as start data - labeling jobs on Cloud, managers and - workers handle the jobs using - CrowdCompute console. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a specialist_pool_service.GetSpecialistPoolRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, specialist_pool_service.GetSpecialistPoolRequest): - request = specialist_pool_service.GetSpecialistPoolRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_specialist_pool] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_specialist_pools(self, - request: Union[specialist_pool_service.ListSpecialistPoolsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListSpecialistPoolsPager: - r"""Lists SpecialistPools in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListSpecialistPoolsRequest, dict]): - The request object. Request message for - [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1.SpecialistPoolService.ListSpecialistPools]. - parent (str): - Required. The name of the SpecialistPool's parent - resource. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.specialist_pool_service.pagers.ListSpecialistPoolsPager: - Response message for - [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1.SpecialistPoolService.ListSpecialistPools]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a specialist_pool_service.ListSpecialistPoolsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, specialist_pool_service.ListSpecialistPoolsRequest): - request = specialist_pool_service.ListSpecialistPoolsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_specialist_pools] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListSpecialistPoolsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_specialist_pool(self, - request: Union[specialist_pool_service.DeleteSpecialistPoolRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a SpecialistPool as well as all Specialists - in the pool. - - Args: - request (Union[google.cloud.aiplatform_v1.types.DeleteSpecialistPoolRequest, dict]): - The request object. Request message for - [SpecialistPoolService.DeleteSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.DeleteSpecialistPool]. - name (str): - Required. The resource name of the SpecialistPool to - delete. Format: - ``projects/{project}/locations/{location}/specialistPools/{specialist_pool}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a specialist_pool_service.DeleteSpecialistPoolRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, specialist_pool_service.DeleteSpecialistPoolRequest): - request = specialist_pool_service.DeleteSpecialistPoolRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_specialist_pool] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def update_specialist_pool(self, - request: Union[specialist_pool_service.UpdateSpecialistPoolRequest, dict] = None, - *, - specialist_pool: gca_specialist_pool.SpecialistPool = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Updates a SpecialistPool. - - Args: - request (Union[google.cloud.aiplatform_v1.types.UpdateSpecialistPoolRequest, dict]): - The request object. Request message for - [SpecialistPoolService.UpdateSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.UpdateSpecialistPool]. - specialist_pool (google.cloud.aiplatform_v1.types.SpecialistPool): - Required. The SpecialistPool which - replaces the resource on the server. - - This corresponds to the ``specialist_pool`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The update mask applies to - the resource. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.SpecialistPool` SpecialistPool represents customers' own workforce to work on their data - labeling jobs. It includes a group of specialist - managers and workers. Managers are responsible for - managing the workers in this pool as well as - customers' data labeling jobs associated with this - pool. Customers create specialist pool as well as - start data labeling jobs on Cloud, managers and - workers handle the jobs using CrowdCompute console. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([specialist_pool, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a specialist_pool_service.UpdateSpecialistPoolRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, specialist_pool_service.UpdateSpecialistPoolRequest): - request = specialist_pool_service.UpdateSpecialistPoolRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if specialist_pool is not None: - request.specialist_pool = specialist_pool - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_specialist_pool] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("specialist_pool.name", request.specialist_pool.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - gca_specialist_pool.SpecialistPool, - metadata_type=specialist_pool_service.UpdateSpecialistPoolOperationMetadata, - ) - - # Done; return the response. - return response - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - """Releases underlying transport's resources. - - .. warning:: - ONLY use as a context manager if the transport is NOT shared - with other clients! Exiting the with block will CLOSE the transport - and may cause errors in other clients! - """ - self.transport.close() - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "SpecialistPoolServiceClient", -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/pagers.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/pagers.py deleted file mode 100644 index 2c11703971..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/pagers.py +++ /dev/null @@ -1,141 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator - -from google.cloud.aiplatform_v1.types import specialist_pool -from google.cloud.aiplatform_v1.types import specialist_pool_service - - -class ListSpecialistPoolsPager: - """A pager for iterating through ``list_specialist_pools`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListSpecialistPoolsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``specialist_pools`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListSpecialistPools`` requests and continue to iterate - through the ``specialist_pools`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListSpecialistPoolsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., specialist_pool_service.ListSpecialistPoolsResponse], - request: specialist_pool_service.ListSpecialistPoolsRequest, - response: specialist_pool_service.ListSpecialistPoolsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListSpecialistPoolsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListSpecialistPoolsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = specialist_pool_service.ListSpecialistPoolsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[specialist_pool_service.ListSpecialistPoolsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[specialist_pool.SpecialistPool]: - for page in self.pages: - yield from page.specialist_pools - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListSpecialistPoolsAsyncPager: - """A pager for iterating through ``list_specialist_pools`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListSpecialistPoolsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``specialist_pools`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListSpecialistPools`` requests and continue to iterate - through the ``specialist_pools`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListSpecialistPoolsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[specialist_pool_service.ListSpecialistPoolsResponse]], - request: specialist_pool_service.ListSpecialistPoolsRequest, - response: specialist_pool_service.ListSpecialistPoolsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListSpecialistPoolsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListSpecialistPoolsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = specialist_pool_service.ListSpecialistPoolsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[specialist_pool_service.ListSpecialistPoolsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[specialist_pool.SpecialistPool]: - async def async_generator(): - async for page in self.pages: - for response in page.specialist_pools: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/__init__.py deleted file mode 100644 index ba8c9d7eb5..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import SpecialistPoolServiceTransport -from .grpc import SpecialistPoolServiceGrpcTransport -from .grpc_asyncio import SpecialistPoolServiceGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[SpecialistPoolServiceTransport]] -_transport_registry['grpc'] = SpecialistPoolServiceGrpcTransport -_transport_registry['grpc_asyncio'] = SpecialistPoolServiceGrpcAsyncIOTransport - -__all__ = ( - 'SpecialistPoolServiceTransport', - 'SpecialistPoolServiceGrpcTransport', - 'SpecialistPoolServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/base.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/base.py deleted file mode 100644 index d3f82c1039..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/base.py +++ /dev/null @@ -1,210 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import pkg_resources - -import google.auth # type: ignore -import google.api_core -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.aiplatform_v1.types import specialist_pool -from google.cloud.aiplatform_v1.types import specialist_pool_service -from google.longrunning import operations_pb2 # type: ignore - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-aiplatform', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -class SpecialistPoolServiceTransport(abc.ABC): - """Abstract transport class for SpecialistPoolService.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'aiplatform.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials are service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.create_specialist_pool: gapic_v1.method.wrap_method( - self.create_specialist_pool, - default_timeout=None, - client_info=client_info, - ), - self.get_specialist_pool: gapic_v1.method.wrap_method( - self.get_specialist_pool, - default_timeout=None, - client_info=client_info, - ), - self.list_specialist_pools: gapic_v1.method.wrap_method( - self.list_specialist_pools, - default_timeout=None, - client_info=client_info, - ), - self.delete_specialist_pool: gapic_v1.method.wrap_method( - self.delete_specialist_pool, - default_timeout=None, - client_info=client_info, - ), - self.update_specialist_pool: gapic_v1.method.wrap_method( - self.update_specialist_pool, - default_timeout=None, - client_info=client_info, - ), - } - - def close(self): - """Closes resources associated with the transport. - - .. warning:: - Only call this method if the transport is NOT shared - with other clients - this may cause errors in other clients! - """ - raise NotImplementedError() - - @property - def operations_client(self): - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def create_specialist_pool(self) -> Callable[ - [specialist_pool_service.CreateSpecialistPoolRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def get_specialist_pool(self) -> Callable[ - [specialist_pool_service.GetSpecialistPoolRequest], - Union[ - specialist_pool.SpecialistPool, - Awaitable[specialist_pool.SpecialistPool] - ]]: - raise NotImplementedError() - - @property - def list_specialist_pools(self) -> Callable[ - [specialist_pool_service.ListSpecialistPoolsRequest], - Union[ - specialist_pool_service.ListSpecialistPoolsResponse, - Awaitable[specialist_pool_service.ListSpecialistPoolsResponse] - ]]: - raise NotImplementedError() - - @property - def delete_specialist_pool(self) -> Callable[ - [specialist_pool_service.DeleteSpecialistPoolRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def update_specialist_pool(self) -> Callable[ - [specialist_pool_service.UpdateSpecialistPoolRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'SpecialistPoolServiceTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc.py deleted file mode 100644 index 57c4b761ff..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc.py +++ /dev/null @@ -1,384 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers -from google.api_core import operations_v1 -from google.api_core import gapic_v1 -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.aiplatform_v1.types import specialist_pool -from google.cloud.aiplatform_v1.types import specialist_pool_service -from google.longrunning import operations_pb2 # type: ignore -from .base import SpecialistPoolServiceTransport, DEFAULT_CLIENT_INFO - - -class SpecialistPoolServiceGrpcTransport(SpecialistPoolServiceTransport): - """gRPC backend transport for SpecialistPoolService. - - A service for creating and managing Customer SpecialistPools. - When customers start Data Labeling jobs, they can reuse/create - Specialist Pools to bring their own Specialists to label the - data. Customers can add/remove Managers for the Specialist Pool - on Cloud console, then Managers will get email notifications to - manage Specialists and tasks on CrowdCompute console. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_specialist_pool(self) -> Callable[ - [specialist_pool_service.CreateSpecialistPoolRequest], - operations_pb2.Operation]: - r"""Return a callable for the create specialist pool method over gRPC. - - Creates a SpecialistPool. - - Returns: - Callable[[~.CreateSpecialistPoolRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_specialist_pool' not in self._stubs: - self._stubs['create_specialist_pool'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.SpecialistPoolService/CreateSpecialistPool', - request_serializer=specialist_pool_service.CreateSpecialistPoolRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_specialist_pool'] - - @property - def get_specialist_pool(self) -> Callable[ - [specialist_pool_service.GetSpecialistPoolRequest], - specialist_pool.SpecialistPool]: - r"""Return a callable for the get specialist pool method over gRPC. - - Gets a SpecialistPool. - - Returns: - Callable[[~.GetSpecialistPoolRequest], - ~.SpecialistPool]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_specialist_pool' not in self._stubs: - self._stubs['get_specialist_pool'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.SpecialistPoolService/GetSpecialistPool', - request_serializer=specialist_pool_service.GetSpecialistPoolRequest.serialize, - response_deserializer=specialist_pool.SpecialistPool.deserialize, - ) - return self._stubs['get_specialist_pool'] - - @property - def list_specialist_pools(self) -> Callable[ - [specialist_pool_service.ListSpecialistPoolsRequest], - specialist_pool_service.ListSpecialistPoolsResponse]: - r"""Return a callable for the list specialist pools method over gRPC. - - Lists SpecialistPools in a Location. - - Returns: - Callable[[~.ListSpecialistPoolsRequest], - ~.ListSpecialistPoolsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_specialist_pools' not in self._stubs: - self._stubs['list_specialist_pools'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.SpecialistPoolService/ListSpecialistPools', - request_serializer=specialist_pool_service.ListSpecialistPoolsRequest.serialize, - response_deserializer=specialist_pool_service.ListSpecialistPoolsResponse.deserialize, - ) - return self._stubs['list_specialist_pools'] - - @property - def delete_specialist_pool(self) -> Callable[ - [specialist_pool_service.DeleteSpecialistPoolRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete specialist pool method over gRPC. - - Deletes a SpecialistPool as well as all Specialists - in the pool. - - Returns: - Callable[[~.DeleteSpecialistPoolRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_specialist_pool' not in self._stubs: - self._stubs['delete_specialist_pool'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.SpecialistPoolService/DeleteSpecialistPool', - request_serializer=specialist_pool_service.DeleteSpecialistPoolRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_specialist_pool'] - - @property - def update_specialist_pool(self) -> Callable[ - [specialist_pool_service.UpdateSpecialistPoolRequest], - operations_pb2.Operation]: - r"""Return a callable for the update specialist pool method over gRPC. - - Updates a SpecialistPool. - - Returns: - Callable[[~.UpdateSpecialistPoolRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_specialist_pool' not in self._stubs: - self._stubs['update_specialist_pool'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.SpecialistPoolService/UpdateSpecialistPool', - request_serializer=specialist_pool_service.UpdateSpecialistPoolRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['update_specialist_pool'] - - def close(self): - self.grpc_channel.close() - -__all__ = ( - 'SpecialistPoolServiceGrpcTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc_asyncio.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc_asyncio.py deleted file mode 100644 index 5917ce580d..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/grpc_asyncio.py +++ /dev/null @@ -1,388 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers_async -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.aiplatform_v1.types import specialist_pool -from google.cloud.aiplatform_v1.types import specialist_pool_service -from google.longrunning import operations_pb2 # type: ignore -from .base import SpecialistPoolServiceTransport, DEFAULT_CLIENT_INFO -from .grpc import SpecialistPoolServiceGrpcTransport - - -class SpecialistPoolServiceGrpcAsyncIOTransport(SpecialistPoolServiceTransport): - """gRPC AsyncIO backend transport for SpecialistPoolService. - - A service for creating and managing Customer SpecialistPools. - When customers start Data Labeling jobs, they can reuse/create - Specialist Pools to bring their own Specialists to label the - data. Customers can add/remove Managers for the Specialist Pool - on Cloud console, then Managers will get email notifications to - manage Specialists and tasks on CrowdCompute console. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsAsyncClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_specialist_pool(self) -> Callable[ - [specialist_pool_service.CreateSpecialistPoolRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the create specialist pool method over gRPC. - - Creates a SpecialistPool. - - Returns: - Callable[[~.CreateSpecialistPoolRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_specialist_pool' not in self._stubs: - self._stubs['create_specialist_pool'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.SpecialistPoolService/CreateSpecialistPool', - request_serializer=specialist_pool_service.CreateSpecialistPoolRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_specialist_pool'] - - @property - def get_specialist_pool(self) -> Callable[ - [specialist_pool_service.GetSpecialistPoolRequest], - Awaitable[specialist_pool.SpecialistPool]]: - r"""Return a callable for the get specialist pool method over gRPC. - - Gets a SpecialistPool. - - Returns: - Callable[[~.GetSpecialistPoolRequest], - Awaitable[~.SpecialistPool]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_specialist_pool' not in self._stubs: - self._stubs['get_specialist_pool'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.SpecialistPoolService/GetSpecialistPool', - request_serializer=specialist_pool_service.GetSpecialistPoolRequest.serialize, - response_deserializer=specialist_pool.SpecialistPool.deserialize, - ) - return self._stubs['get_specialist_pool'] - - @property - def list_specialist_pools(self) -> Callable[ - [specialist_pool_service.ListSpecialistPoolsRequest], - Awaitable[specialist_pool_service.ListSpecialistPoolsResponse]]: - r"""Return a callable for the list specialist pools method over gRPC. - - Lists SpecialistPools in a Location. - - Returns: - Callable[[~.ListSpecialistPoolsRequest], - Awaitable[~.ListSpecialistPoolsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_specialist_pools' not in self._stubs: - self._stubs['list_specialist_pools'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.SpecialistPoolService/ListSpecialistPools', - request_serializer=specialist_pool_service.ListSpecialistPoolsRequest.serialize, - response_deserializer=specialist_pool_service.ListSpecialistPoolsResponse.deserialize, - ) - return self._stubs['list_specialist_pools'] - - @property - def delete_specialist_pool(self) -> Callable[ - [specialist_pool_service.DeleteSpecialistPoolRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete specialist pool method over gRPC. - - Deletes a SpecialistPool as well as all Specialists - in the pool. - - Returns: - Callable[[~.DeleteSpecialistPoolRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_specialist_pool' not in self._stubs: - self._stubs['delete_specialist_pool'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.SpecialistPoolService/DeleteSpecialistPool', - request_serializer=specialist_pool_service.DeleteSpecialistPoolRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_specialist_pool'] - - @property - def update_specialist_pool(self) -> Callable[ - [specialist_pool_service.UpdateSpecialistPoolRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the update specialist pool method over gRPC. - - Updates a SpecialistPool. - - Returns: - Callable[[~.UpdateSpecialistPoolRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_specialist_pool' not in self._stubs: - self._stubs['update_specialist_pool'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.SpecialistPoolService/UpdateSpecialistPool', - request_serializer=specialist_pool_service.UpdateSpecialistPoolRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['update_specialist_pool'] - - def close(self): - return self.grpc_channel.close() - - -__all__ = ( - 'SpecialistPoolServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/__init__.py deleted file mode 100644 index fa8edec482..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import TensorboardServiceClient -from .async_client import TensorboardServiceAsyncClient - -__all__ = ( - 'TensorboardServiceClient', - 'TensorboardServiceAsyncClient', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/async_client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/async_client.py deleted file mode 100644 index 4253c96098..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/async_client.py +++ /dev/null @@ -1,2711 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, AsyncIterable, Awaitable, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core.client_options import ClientOptions -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1.services.tensorboard_service import pagers -from google.cloud.aiplatform_v1.types import encryption_spec -from google.cloud.aiplatform_v1.types import operation as gca_operation -from google.cloud.aiplatform_v1.types import tensorboard -from google.cloud.aiplatform_v1.types import tensorboard as gca_tensorboard -from google.cloud.aiplatform_v1.types import tensorboard_data -from google.cloud.aiplatform_v1.types import tensorboard_experiment -from google.cloud.aiplatform_v1.types import tensorboard_experiment as gca_tensorboard_experiment -from google.cloud.aiplatform_v1.types import tensorboard_run -from google.cloud.aiplatform_v1.types import tensorboard_run as gca_tensorboard_run -from google.cloud.aiplatform_v1.types import tensorboard_service -from google.cloud.aiplatform_v1.types import tensorboard_time_series -from google.cloud.aiplatform_v1.types import tensorboard_time_series as gca_tensorboard_time_series -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import TensorboardServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import TensorboardServiceGrpcAsyncIOTransport -from .client import TensorboardServiceClient - - -class TensorboardServiceAsyncClient: - """TensorboardService""" - - _client: TensorboardServiceClient - - DEFAULT_ENDPOINT = TensorboardServiceClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = TensorboardServiceClient.DEFAULT_MTLS_ENDPOINT - - tensorboard_path = staticmethod(TensorboardServiceClient.tensorboard_path) - parse_tensorboard_path = staticmethod(TensorboardServiceClient.parse_tensorboard_path) - tensorboard_experiment_path = staticmethod(TensorboardServiceClient.tensorboard_experiment_path) - parse_tensorboard_experiment_path = staticmethod(TensorboardServiceClient.parse_tensorboard_experiment_path) - tensorboard_run_path = staticmethod(TensorboardServiceClient.tensorboard_run_path) - parse_tensorboard_run_path = staticmethod(TensorboardServiceClient.parse_tensorboard_run_path) - tensorboard_time_series_path = staticmethod(TensorboardServiceClient.tensorboard_time_series_path) - parse_tensorboard_time_series_path = staticmethod(TensorboardServiceClient.parse_tensorboard_time_series_path) - common_billing_account_path = staticmethod(TensorboardServiceClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(TensorboardServiceClient.parse_common_billing_account_path) - common_folder_path = staticmethod(TensorboardServiceClient.common_folder_path) - parse_common_folder_path = staticmethod(TensorboardServiceClient.parse_common_folder_path) - common_organization_path = staticmethod(TensorboardServiceClient.common_organization_path) - parse_common_organization_path = staticmethod(TensorboardServiceClient.parse_common_organization_path) - common_project_path = staticmethod(TensorboardServiceClient.common_project_path) - parse_common_project_path = staticmethod(TensorboardServiceClient.parse_common_project_path) - common_location_path = staticmethod(TensorboardServiceClient.common_location_path) - parse_common_location_path = staticmethod(TensorboardServiceClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - TensorboardServiceAsyncClient: The constructed client. - """ - return TensorboardServiceClient.from_service_account_info.__func__(TensorboardServiceAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - TensorboardServiceAsyncClient: The constructed client. - """ - return TensorboardServiceClient.from_service_account_file.__func__(TensorboardServiceAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> TensorboardServiceTransport: - """Returns the transport used by the client instance. - - Returns: - TensorboardServiceTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(TensorboardServiceClient).get_transport_class, type(TensorboardServiceClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, TensorboardServiceTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the tensorboard service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.TensorboardServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = TensorboardServiceClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def create_tensorboard(self, - request: Union[tensorboard_service.CreateTensorboardRequest, dict] = None, - *, - parent: str = None, - tensorboard: gca_tensorboard.Tensorboard = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Creates a Tensorboard. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CreateTensorboardRequest, dict]): - The request object. Request message for - [TensorboardService.CreateTensorboard][google.cloud.aiplatform.v1.TensorboardService.CreateTensorboard]. - parent (:class:`str`): - Required. The resource name of the Location to create - the Tensorboard in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - tensorboard (:class:`google.cloud.aiplatform_v1.types.Tensorboard`): - Required. The Tensorboard to create. - This corresponds to the ``tensorboard`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Tensorboard` Tensorboard is a physical database that stores users' training metrics. - A default Tensorboard is provided in each region of a - GCP project. If needed users can also create extra - Tensorboards in their projects. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, tensorboard]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.CreateTensorboardRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if tensorboard is not None: - request.tensorboard = tensorboard - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_tensorboard, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - gca_tensorboard.Tensorboard, - metadata_type=tensorboard_service.CreateTensorboardOperationMetadata, - ) - - # Done; return the response. - return response - - async def get_tensorboard(self, - request: Union[tensorboard_service.GetTensorboardRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> tensorboard.Tensorboard: - r"""Gets a Tensorboard. - - Args: - request (Union[google.cloud.aiplatform_v1.types.GetTensorboardRequest, dict]): - The request object. Request message for - [TensorboardService.GetTensorboard][google.cloud.aiplatform.v1.TensorboardService.GetTensorboard]. - name (:class:`str`): - Required. The name of the Tensorboard resource. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Tensorboard: - Tensorboard is a physical database - that stores users' training metrics. A - default Tensorboard is provided in each - region of a GCP project. If needed users - can also create extra Tensorboards in - their projects. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.GetTensorboardRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_tensorboard, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_tensorboard(self, - request: Union[tensorboard_service.UpdateTensorboardRequest, dict] = None, - *, - tensorboard: gca_tensorboard.Tensorboard = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Updates a Tensorboard. - - Args: - request (Union[google.cloud.aiplatform_v1.types.UpdateTensorboardRequest, dict]): - The request object. Request message for - [TensorboardService.UpdateTensorboard][google.cloud.aiplatform.v1.TensorboardService.UpdateTensorboard]. - tensorboard (:class:`google.cloud.aiplatform_v1.types.Tensorboard`): - Required. The Tensorboard's ``name`` field is used to - identify the Tensorboard to be updated. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` - - This corresponds to the ``tensorboard`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. Field mask is used to specify the fields to be - overwritten in the Tensorboard resource by the update. - The fields specified in the update_mask are relative to - the resource, not the full request. A field will be - overwritten if it is in the mask. If the user does not - provide a mask then all fields will be overwritten if - new values are specified. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Tensorboard` Tensorboard is a physical database that stores users' training metrics. - A default Tensorboard is provided in each region of a - GCP project. If needed users can also create extra - Tensorboards in their projects. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([tensorboard, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.UpdateTensorboardRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if tensorboard is not None: - request.tensorboard = tensorboard - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_tensorboard, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("tensorboard.name", request.tensorboard.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - gca_tensorboard.Tensorboard, - metadata_type=tensorboard_service.UpdateTensorboardOperationMetadata, - ) - - # Done; return the response. - return response - - async def list_tensorboards(self, - request: Union[tensorboard_service.ListTensorboardsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListTensorboardsAsyncPager: - r"""Lists Tensorboards in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListTensorboardsRequest, dict]): - The request object. Request message for - [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1.TensorboardService.ListTensorboards]. - parent (:class:`str`): - Required. The resource name of the Location to list - Tensorboards. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.tensorboard_service.pagers.ListTensorboardsAsyncPager: - Response message for - [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1.TensorboardService.ListTensorboards]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.ListTensorboardsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_tensorboards, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListTensorboardsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_tensorboard(self, - request: Union[tensorboard_service.DeleteTensorboardRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a Tensorboard. - - Args: - request (Union[google.cloud.aiplatform_v1.types.DeleteTensorboardRequest, dict]): - The request object. Request message for - [TensorboardService.DeleteTensorboard][google.cloud.aiplatform.v1.TensorboardService.DeleteTensorboard]. - name (:class:`str`): - Required. The name of the Tensorboard to be deleted. - Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.DeleteTensorboardRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_tensorboard, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def create_tensorboard_experiment(self, - request: Union[tensorboard_service.CreateTensorboardExperimentRequest, dict] = None, - *, - parent: str = None, - tensorboard_experiment: gca_tensorboard_experiment.TensorboardExperiment = None, - tensorboard_experiment_id: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_tensorboard_experiment.TensorboardExperiment: - r"""Creates a TensorboardExperiment. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CreateTensorboardExperimentRequest, dict]): - The request object. Request message for - [TensorboardService.CreateTensorboardExperiment][google.cloud.aiplatform.v1.TensorboardService.CreateTensorboardExperiment]. - parent (:class:`str`): - Required. The resource name of the Tensorboard to create - the TensorboardExperiment in. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - tensorboard_experiment (:class:`google.cloud.aiplatform_v1.types.TensorboardExperiment`): - The TensorboardExperiment to create. - This corresponds to the ``tensorboard_experiment`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - tensorboard_experiment_id (:class:`str`): - Required. The ID to use for the Tensorboard experiment, - which will become the final component of the Tensorboard - experiment's resource name. - - This value should be 1-128 characters, and valid - characters are /[a-z][0-9]-/. - - This corresponds to the ``tensorboard_experiment_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.TensorboardExperiment: - A TensorboardExperiment is a group of - TensorboardRuns, that are typically the - results of a training job run, in a - Tensorboard. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, tensorboard_experiment, tensorboard_experiment_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.CreateTensorboardExperimentRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if tensorboard_experiment is not None: - request.tensorboard_experiment = tensorboard_experiment - if tensorboard_experiment_id is not None: - request.tensorboard_experiment_id = tensorboard_experiment_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_tensorboard_experiment, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_tensorboard_experiment(self, - request: Union[tensorboard_service.GetTensorboardExperimentRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> tensorboard_experiment.TensorboardExperiment: - r"""Gets a TensorboardExperiment. - - Args: - request (Union[google.cloud.aiplatform_v1.types.GetTensorboardExperimentRequest, dict]): - The request object. Request message for - [TensorboardService.GetTensorboardExperiment][google.cloud.aiplatform.v1.TensorboardService.GetTensorboardExperiment]. - name (:class:`str`): - Required. The name of the TensorboardExperiment - resource. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.TensorboardExperiment: - A TensorboardExperiment is a group of - TensorboardRuns, that are typically the - results of a training job run, in a - Tensorboard. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.GetTensorboardExperimentRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_tensorboard_experiment, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_tensorboard_experiment(self, - request: Union[tensorboard_service.UpdateTensorboardExperimentRequest, dict] = None, - *, - tensorboard_experiment: gca_tensorboard_experiment.TensorboardExperiment = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_tensorboard_experiment.TensorboardExperiment: - r"""Updates a TensorboardExperiment. - - Args: - request (Union[google.cloud.aiplatform_v1.types.UpdateTensorboardExperimentRequest, dict]): - The request object. Request message for - [TensorboardService.UpdateTensorboardExperiment][google.cloud.aiplatform.v1.TensorboardService.UpdateTensorboardExperiment]. - tensorboard_experiment (:class:`google.cloud.aiplatform_v1.types.TensorboardExperiment`): - Required. The TensorboardExperiment's ``name`` field is - used to identify the TensorboardExperiment to be - updated. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` - - This corresponds to the ``tensorboard_experiment`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. Field mask is used to specify the fields to be - overwritten in the TensorboardExperiment resource by the - update. The fields specified in the update_mask are - relative to the resource, not the full request. A field - will be overwritten if it is in the mask. If the user - does not provide a mask then all fields will be - overwritten if new values are specified. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.TensorboardExperiment: - A TensorboardExperiment is a group of - TensorboardRuns, that are typically the - results of a training job run, in a - Tensorboard. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([tensorboard_experiment, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.UpdateTensorboardExperimentRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if tensorboard_experiment is not None: - request.tensorboard_experiment = tensorboard_experiment - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_tensorboard_experiment, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("tensorboard_experiment.name", request.tensorboard_experiment.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_tensorboard_experiments(self, - request: Union[tensorboard_service.ListTensorboardExperimentsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListTensorboardExperimentsAsyncPager: - r"""Lists TensorboardExperiments in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListTensorboardExperimentsRequest, dict]): - The request object. Request message for - [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1.TensorboardService.ListTensorboardExperiments]. - parent (:class:`str`): - Required. The resource name of the - Tensorboard to list - TensorboardExperiments. Format: - 'projects/{project}/locations/{location}/tensorboards/{tensorboard}' - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.tensorboard_service.pagers.ListTensorboardExperimentsAsyncPager: - Response message for - [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1.TensorboardService.ListTensorboardExperiments]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.ListTensorboardExperimentsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_tensorboard_experiments, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListTensorboardExperimentsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_tensorboard_experiment(self, - request: Union[tensorboard_service.DeleteTensorboardExperimentRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a TensorboardExperiment. - - Args: - request (Union[google.cloud.aiplatform_v1.types.DeleteTensorboardExperimentRequest, dict]): - The request object. Request message for - [TensorboardService.DeleteTensorboardExperiment][google.cloud.aiplatform.v1.TensorboardService.DeleteTensorboardExperiment]. - name (:class:`str`): - Required. The name of the TensorboardExperiment to be - deleted. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.DeleteTensorboardExperimentRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_tensorboard_experiment, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def create_tensorboard_run(self, - request: Union[tensorboard_service.CreateTensorboardRunRequest, dict] = None, - *, - parent: str = None, - tensorboard_run: gca_tensorboard_run.TensorboardRun = None, - tensorboard_run_id: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_tensorboard_run.TensorboardRun: - r"""Creates a TensorboardRun. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CreateTensorboardRunRequest, dict]): - The request object. Request message for - [TensorboardService.CreateTensorboardRun][google.cloud.aiplatform.v1.TensorboardService.CreateTensorboardRun]. - parent (:class:`str`): - Required. The resource name of the TensorboardExperiment - to create the TensorboardRun in. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - tensorboard_run (:class:`google.cloud.aiplatform_v1.types.TensorboardRun`): - Required. The TensorboardRun to - create. - - This corresponds to the ``tensorboard_run`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - tensorboard_run_id (:class:`str`): - Required. The ID to use for the Tensorboard run, which - will become the final component of the Tensorboard run's - resource name. - - This value should be 1-128 characters, and valid - characters are /[a-z][0-9]-/. - - This corresponds to the ``tensorboard_run_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.TensorboardRun: - TensorboardRun maps to a specific - execution of a training job with a given - set of hyperparameter values, model - definition, dataset, etc - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, tensorboard_run, tensorboard_run_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.CreateTensorboardRunRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if tensorboard_run is not None: - request.tensorboard_run = tensorboard_run - if tensorboard_run_id is not None: - request.tensorboard_run_id = tensorboard_run_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_tensorboard_run, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def batch_create_tensorboard_runs(self, - request: Union[tensorboard_service.BatchCreateTensorboardRunsRequest, dict] = None, - *, - parent: str = None, - requests: Sequence[tensorboard_service.CreateTensorboardRunRequest] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> tensorboard_service.BatchCreateTensorboardRunsResponse: - r"""Batch create TensorboardRuns. - - Args: - request (Union[google.cloud.aiplatform_v1.types.BatchCreateTensorboardRunsRequest, dict]): - The request object. Request message for - [TensorboardService.BatchCreateTensorboardRuns][google.cloud.aiplatform.v1.TensorboardService.BatchCreateTensorboardRuns]. - parent (:class:`str`): - Required. The resource name of the TensorboardExperiment - to create the TensorboardRuns in. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` - The parent field in the CreateTensorboardRunRequest - messages must match this field. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - requests (:class:`Sequence[google.cloud.aiplatform_v1.types.CreateTensorboardRunRequest]`): - Required. The request message - specifying the TensorboardRuns to - create. A maximum of 1000 - TensorboardRuns can be created in a - batch. - - This corresponds to the ``requests`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.BatchCreateTensorboardRunsResponse: - Response message for - [TensorboardService.BatchCreateTensorboardRuns][google.cloud.aiplatform.v1.TensorboardService.BatchCreateTensorboardRuns]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, requests]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.BatchCreateTensorboardRunsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if requests: - request.requests.extend(requests) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.batch_create_tensorboard_runs, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_tensorboard_run(self, - request: Union[tensorboard_service.GetTensorboardRunRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> tensorboard_run.TensorboardRun: - r"""Gets a TensorboardRun. - - Args: - request (Union[google.cloud.aiplatform_v1.types.GetTensorboardRunRequest, dict]): - The request object. Request message for - [TensorboardService.GetTensorboardRun][google.cloud.aiplatform.v1.TensorboardService.GetTensorboardRun]. - name (:class:`str`): - Required. The name of the TensorboardRun resource. - Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.TensorboardRun: - TensorboardRun maps to a specific - execution of a training job with a given - set of hyperparameter values, model - definition, dataset, etc - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.GetTensorboardRunRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_tensorboard_run, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_tensorboard_run(self, - request: Union[tensorboard_service.UpdateTensorboardRunRequest, dict] = None, - *, - tensorboard_run: gca_tensorboard_run.TensorboardRun = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_tensorboard_run.TensorboardRun: - r"""Updates a TensorboardRun. - - Args: - request (Union[google.cloud.aiplatform_v1.types.UpdateTensorboardRunRequest, dict]): - The request object. Request message for - [TensorboardService.UpdateTensorboardRun][google.cloud.aiplatform.v1.TensorboardService.UpdateTensorboardRun]. - tensorboard_run (:class:`google.cloud.aiplatform_v1.types.TensorboardRun`): - Required. The TensorboardRun's ``name`` field is used to - identify the TensorboardRun to be updated. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` - - This corresponds to the ``tensorboard_run`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. Field mask is used to specify the fields to be - overwritten in the TensorboardRun resource by the - update. The fields specified in the update_mask are - relative to the resource, not the full request. A field - will be overwritten if it is in the mask. If the user - does not provide a mask then all fields will be - overwritten if new values are specified. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.TensorboardRun: - TensorboardRun maps to a specific - execution of a training job with a given - set of hyperparameter values, model - definition, dataset, etc - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([tensorboard_run, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.UpdateTensorboardRunRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if tensorboard_run is not None: - request.tensorboard_run = tensorboard_run - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_tensorboard_run, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("tensorboard_run.name", request.tensorboard_run.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_tensorboard_runs(self, - request: Union[tensorboard_service.ListTensorboardRunsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListTensorboardRunsAsyncPager: - r"""Lists TensorboardRuns in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListTensorboardRunsRequest, dict]): - The request object. Request message for - [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1.TensorboardService.ListTensorboardRuns]. - parent (:class:`str`): - Required. The resource name of the - TensorboardExperiment to list - TensorboardRuns. Format: - 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}' - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.tensorboard_service.pagers.ListTensorboardRunsAsyncPager: - Response message for - [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1.TensorboardService.ListTensorboardRuns]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.ListTensorboardRunsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_tensorboard_runs, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListTensorboardRunsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_tensorboard_run(self, - request: Union[tensorboard_service.DeleteTensorboardRunRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a TensorboardRun. - - Args: - request (Union[google.cloud.aiplatform_v1.types.DeleteTensorboardRunRequest, dict]): - The request object. Request message for - [TensorboardService.DeleteTensorboardRun][google.cloud.aiplatform.v1.TensorboardService.DeleteTensorboardRun]. - name (:class:`str`): - Required. The name of the TensorboardRun to be deleted. - Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.DeleteTensorboardRunRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_tensorboard_run, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def batch_create_tensorboard_time_series(self, - request: Union[tensorboard_service.BatchCreateTensorboardTimeSeriesRequest, dict] = None, - *, - parent: str = None, - requests: Sequence[tensorboard_service.CreateTensorboardTimeSeriesRequest] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> tensorboard_service.BatchCreateTensorboardTimeSeriesResponse: - r"""Batch create TensorboardTimeSeries that belong to a - TensorboardExperiment. - - Args: - request (Union[google.cloud.aiplatform_v1.types.BatchCreateTensorboardTimeSeriesRequest, dict]): - The request object. Request message for - [TensorboardService.BatchCreateTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.BatchCreateTensorboardTimeSeries]. - parent (:class:`str`): - Required. The resource name of the TensorboardExperiment - to create the TensorboardTimeSeries in. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` - The TensorboardRuns referenced by the parent fields in - the CreateTensorboardTimeSeriesRequest messages must be - sub resources of this TensorboardExperiment. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - requests (:class:`Sequence[google.cloud.aiplatform_v1.types.CreateTensorboardTimeSeriesRequest]`): - Required. The request message - specifying the TensorboardTimeSeries to - create. A maximum of 1000 - TensorboardTimeSeries can be created in - a batch. - - This corresponds to the ``requests`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.BatchCreateTensorboardTimeSeriesResponse: - Response message for - [TensorboardService.BatchCreateTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.BatchCreateTensorboardTimeSeries]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, requests]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.BatchCreateTensorboardTimeSeriesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if requests: - request.requests.extend(requests) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.batch_create_tensorboard_time_series, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def create_tensorboard_time_series(self, - request: Union[tensorboard_service.CreateTensorboardTimeSeriesRequest, dict] = None, - *, - parent: str = None, - tensorboard_time_series: gca_tensorboard_time_series.TensorboardTimeSeries = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_tensorboard_time_series.TensorboardTimeSeries: - r"""Creates a TensorboardTimeSeries. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CreateTensorboardTimeSeriesRequest, dict]): - The request object. Request message for - [TensorboardService.CreateTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.CreateTensorboardTimeSeries]. - parent (:class:`str`): - Required. The resource name of the TensorboardRun to - create the TensorboardTimeSeries in. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - tensorboard_time_series (:class:`google.cloud.aiplatform_v1.types.TensorboardTimeSeries`): - Required. The TensorboardTimeSeries - to create. - - This corresponds to the ``tensorboard_time_series`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.TensorboardTimeSeries: - TensorboardTimeSeries maps to times - series produced in training runs - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, tensorboard_time_series]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.CreateTensorboardTimeSeriesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if tensorboard_time_series is not None: - request.tensorboard_time_series = tensorboard_time_series - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_tensorboard_time_series, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_tensorboard_time_series(self, - request: Union[tensorboard_service.GetTensorboardTimeSeriesRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> tensorboard_time_series.TensorboardTimeSeries: - r"""Gets a TensorboardTimeSeries. - - Args: - request (Union[google.cloud.aiplatform_v1.types.GetTensorboardTimeSeriesRequest, dict]): - The request object. Request message for - [TensorboardService.GetTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.GetTensorboardTimeSeries]. - name (:class:`str`): - Required. The name of the TensorboardTimeSeries - resource. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.TensorboardTimeSeries: - TensorboardTimeSeries maps to times - series produced in training runs - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.GetTensorboardTimeSeriesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_tensorboard_time_series, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_tensorboard_time_series(self, - request: Union[tensorboard_service.UpdateTensorboardTimeSeriesRequest, dict] = None, - *, - tensorboard_time_series: gca_tensorboard_time_series.TensorboardTimeSeries = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_tensorboard_time_series.TensorboardTimeSeries: - r"""Updates a TensorboardTimeSeries. - - Args: - request (Union[google.cloud.aiplatform_v1.types.UpdateTensorboardTimeSeriesRequest, dict]): - The request object. Request message for - [TensorboardService.UpdateTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.UpdateTensorboardTimeSeries]. - tensorboard_time_series (:class:`google.cloud.aiplatform_v1.types.TensorboardTimeSeries`): - Required. The TensorboardTimeSeries' ``name`` field is - used to identify the TensorboardTimeSeries to be - updated. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` - - This corresponds to the ``tensorboard_time_series`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. Field mask is used to specify the fields to be - overwritten in the TensorboardTimeSeries resource by the - update. The fields specified in the update_mask are - relative to the resource, not the full request. A field - will be overwritten if it is in the mask. If the user - does not provide a mask then all fields will be - overwritten if new values are specified. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.TensorboardTimeSeries: - TensorboardTimeSeries maps to times - series produced in training runs - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([tensorboard_time_series, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.UpdateTensorboardTimeSeriesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if tensorboard_time_series is not None: - request.tensorboard_time_series = tensorboard_time_series - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_tensorboard_time_series, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("tensorboard_time_series.name", request.tensorboard_time_series.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_tensorboard_time_series(self, - request: Union[tensorboard_service.ListTensorboardTimeSeriesRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListTensorboardTimeSeriesAsyncPager: - r"""Lists TensorboardTimeSeries in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListTensorboardTimeSeriesRequest, dict]): - The request object. Request message for - [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.ListTensorboardTimeSeries]. - parent (:class:`str`): - Required. The resource name of the - TensorboardRun to list - TensorboardTimeSeries. Format: - 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}' - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.tensorboard_service.pagers.ListTensorboardTimeSeriesAsyncPager: - Response message for - [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.ListTensorboardTimeSeries]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.ListTensorboardTimeSeriesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_tensorboard_time_series, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListTensorboardTimeSeriesAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_tensorboard_time_series(self, - request: Union[tensorboard_service.DeleteTensorboardTimeSeriesRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a TensorboardTimeSeries. - - Args: - request (Union[google.cloud.aiplatform_v1.types.DeleteTensorboardTimeSeriesRequest, dict]): - The request object. Request message for - [TensorboardService.DeleteTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.DeleteTensorboardTimeSeries]. - name (:class:`str`): - Required. The name of the TensorboardTimeSeries to be - deleted. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.DeleteTensorboardTimeSeriesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_tensorboard_time_series, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def batch_read_tensorboard_time_series_data(self, - request: Union[tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest, dict] = None, - *, - tensorboard: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse: - r"""Reads multiple TensorboardTimeSeries' data. The data - point number limit is 1000 for scalars, 100 for tensors - and blob references. If the number of data points stored - is less than the limit, all data will be returned. - Otherwise, that limit number of data points will be - randomly selected from this time series and returned. - - Args: - request (Union[google.cloud.aiplatform_v1.types.BatchReadTensorboardTimeSeriesDataRequest, dict]): - The request object. Request message for - [TensorboardService.BatchReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1.TensorboardService.BatchReadTensorboardTimeSeriesData]. - tensorboard (:class:`str`): - Required. The resource name of the Tensorboard - containing TensorboardTimeSeries to read data from. - Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}``. - The TensorboardTimeSeries referenced by - [time_series][google.cloud.aiplatform.v1.BatchReadTensorboardTimeSeriesDataRequest.time_series] - must be sub resources of this Tensorboard. - - This corresponds to the ``tensorboard`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.BatchReadTensorboardTimeSeriesDataResponse: - Response message for - [TensorboardService.BatchReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1.TensorboardService.BatchReadTensorboardTimeSeriesData]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([tensorboard]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if tensorboard is not None: - request.tensorboard = tensorboard - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.batch_read_tensorboard_time_series_data, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("tensorboard", request.tensorboard), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def read_tensorboard_time_series_data(self, - request: Union[tensorboard_service.ReadTensorboardTimeSeriesDataRequest, dict] = None, - *, - tensorboard_time_series: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> tensorboard_service.ReadTensorboardTimeSeriesDataResponse: - r"""Reads a TensorboardTimeSeries' data. By default, if the number - of data points stored is less than 1000, all data will be - returned. Otherwise, 1000 data points will be randomly selected - from this time series and returned. This value can be changed by - changing max_data_points, which can't be greater than 10k. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ReadTensorboardTimeSeriesDataRequest, dict]): - The request object. Request message for - [TensorboardService.ReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1.TensorboardService.ReadTensorboardTimeSeriesData]. - tensorboard_time_series (:class:`str`): - Required. The resource name of the TensorboardTimeSeries - to read data from. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` - - This corresponds to the ``tensorboard_time_series`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.ReadTensorboardTimeSeriesDataResponse: - Response message for - [TensorboardService.ReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1.TensorboardService.ReadTensorboardTimeSeriesData]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([tensorboard_time_series]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.ReadTensorboardTimeSeriesDataRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if tensorboard_time_series is not None: - request.tensorboard_time_series = tensorboard_time_series - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.read_tensorboard_time_series_data, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("tensorboard_time_series", request.tensorboard_time_series), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def read_tensorboard_blob_data(self, - request: Union[tensorboard_service.ReadTensorboardBlobDataRequest, dict] = None, - *, - time_series: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> Awaitable[AsyncIterable[tensorboard_service.ReadTensorboardBlobDataResponse]]: - r"""Gets bytes of TensorboardBlobs. - This is to allow reading blob data stored in consumer - project's Cloud Storage bucket without users having to - obtain Cloud Storage access permission. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ReadTensorboardBlobDataRequest, dict]): - The request object. Request message for - [TensorboardService.ReadTensorboardBlobData][google.cloud.aiplatform.v1.TensorboardService.ReadTensorboardBlobData]. - time_series (:class:`str`): - Required. The resource name of the TensorboardTimeSeries - to list Blobs. Format: - 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}' - - This corresponds to the ``time_series`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - AsyncIterable[google.cloud.aiplatform_v1.types.ReadTensorboardBlobDataResponse]: - Response message for - [TensorboardService.ReadTensorboardBlobData][google.cloud.aiplatform.v1.TensorboardService.ReadTensorboardBlobData]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([time_series]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.ReadTensorboardBlobDataRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if time_series is not None: - request.time_series = time_series - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.read_tensorboard_blob_data, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("time_series", request.time_series), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def write_tensorboard_experiment_data(self, - request: Union[tensorboard_service.WriteTensorboardExperimentDataRequest, dict] = None, - *, - tensorboard_experiment: str = None, - write_run_data_requests: Sequence[tensorboard_service.WriteTensorboardRunDataRequest] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> tensorboard_service.WriteTensorboardExperimentDataResponse: - r"""Write time series data points of multiple - TensorboardTimeSeries in multiple TensorboardRun's. If - any data fail to be ingested, an error will be returned. - - Args: - request (Union[google.cloud.aiplatform_v1.types.WriteTensorboardExperimentDataRequest, dict]): - The request object. Request message for - [TensorboardService.WriteTensorboardExperimentData][google.cloud.aiplatform.v1.TensorboardService.WriteTensorboardExperimentData]. - tensorboard_experiment (:class:`str`): - Required. The resource name of the TensorboardExperiment - to write data to. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` - - This corresponds to the ``tensorboard_experiment`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - write_run_data_requests (:class:`Sequence[google.cloud.aiplatform_v1.types.WriteTensorboardRunDataRequest]`): - Required. Requests containing per-run - TensorboardTimeSeries data to write. - - This corresponds to the ``write_run_data_requests`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.WriteTensorboardExperimentDataResponse: - Response message for - [TensorboardService.WriteTensorboardExperimentData][google.cloud.aiplatform.v1.TensorboardService.WriteTensorboardExperimentData]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([tensorboard_experiment, write_run_data_requests]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.WriteTensorboardExperimentDataRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if tensorboard_experiment is not None: - request.tensorboard_experiment = tensorboard_experiment - if write_run_data_requests: - request.write_run_data_requests.extend(write_run_data_requests) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.write_tensorboard_experiment_data, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("tensorboard_experiment", request.tensorboard_experiment), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def write_tensorboard_run_data(self, - request: Union[tensorboard_service.WriteTensorboardRunDataRequest, dict] = None, - *, - tensorboard_run: str = None, - time_series_data: Sequence[tensorboard_data.TimeSeriesData] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> tensorboard_service.WriteTensorboardRunDataResponse: - r"""Write time series data points into multiple - TensorboardTimeSeries under a TensorboardRun. If any - data fail to be ingested, an error will be returned. - - Args: - request (Union[google.cloud.aiplatform_v1.types.WriteTensorboardRunDataRequest, dict]): - The request object. Request message for - [TensorboardService.WriteTensorboardRunData][google.cloud.aiplatform.v1.TensorboardService.WriteTensorboardRunData]. - tensorboard_run (:class:`str`): - Required. The resource name of the TensorboardRun to - write data to. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` - - This corresponds to the ``tensorboard_run`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - time_series_data (:class:`Sequence[google.cloud.aiplatform_v1.types.TimeSeriesData]`): - Required. The TensorboardTimeSeries - data to write. Values with in a time - series are indexed by their step value. - Repeated writes to the same step will - overwrite the existing value for that - step. - The upper limit of data points per write - request is 5000. - - This corresponds to the ``time_series_data`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.WriteTensorboardRunDataResponse: - Response message for - [TensorboardService.WriteTensorboardRunData][google.cloud.aiplatform.v1.TensorboardService.WriteTensorboardRunData]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([tensorboard_run, time_series_data]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.WriteTensorboardRunDataRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if tensorboard_run is not None: - request.tensorboard_run = tensorboard_run - if time_series_data: - request.time_series_data.extend(time_series_data) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.write_tensorboard_run_data, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("tensorboard_run", request.tensorboard_run), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def export_tensorboard_time_series_data(self, - request: Union[tensorboard_service.ExportTensorboardTimeSeriesDataRequest, dict] = None, - *, - tensorboard_time_series: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ExportTensorboardTimeSeriesDataAsyncPager: - r"""Exports a TensorboardTimeSeries' data. Data is - returned in paginated responses. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ExportTensorboardTimeSeriesDataRequest, dict]): - The request object. Request message for - [TensorboardService.ExportTensorboardTimeSeriesData][google.cloud.aiplatform.v1.TensorboardService.ExportTensorboardTimeSeriesData]. - tensorboard_time_series (:class:`str`): - Required. The resource name of the TensorboardTimeSeries - to export data from. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` - - This corresponds to the ``tensorboard_time_series`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.tensorboard_service.pagers.ExportTensorboardTimeSeriesDataAsyncPager: - Response message for - [TensorboardService.ExportTensorboardTimeSeriesData][google.cloud.aiplatform.v1.TensorboardService.ExportTensorboardTimeSeriesData]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([tensorboard_time_series]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.ExportTensorboardTimeSeriesDataRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if tensorboard_time_series is not None: - request.tensorboard_time_series = tensorboard_time_series - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.export_tensorboard_time_series_data, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("tensorboard_time_series", request.tensorboard_time_series), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ExportTensorboardTimeSeriesDataAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def __aenter__(self): - return self - - async def __aexit__(self, exc_type, exc, tb): - await self.transport.close() - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "TensorboardServiceAsyncClient", -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/client.py deleted file mode 100644 index 4ae2c26d6d..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/client.py +++ /dev/null @@ -1,2936 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import os -import re -from typing import Dict, Optional, Iterable, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1.services.tensorboard_service import pagers -from google.cloud.aiplatform_v1.types import encryption_spec -from google.cloud.aiplatform_v1.types import operation as gca_operation -from google.cloud.aiplatform_v1.types import tensorboard -from google.cloud.aiplatform_v1.types import tensorboard as gca_tensorboard -from google.cloud.aiplatform_v1.types import tensorboard_data -from google.cloud.aiplatform_v1.types import tensorboard_experiment -from google.cloud.aiplatform_v1.types import tensorboard_experiment as gca_tensorboard_experiment -from google.cloud.aiplatform_v1.types import tensorboard_run -from google.cloud.aiplatform_v1.types import tensorboard_run as gca_tensorboard_run -from google.cloud.aiplatform_v1.types import tensorboard_service -from google.cloud.aiplatform_v1.types import tensorboard_time_series -from google.cloud.aiplatform_v1.types import tensorboard_time_series as gca_tensorboard_time_series -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import TensorboardServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import TensorboardServiceGrpcTransport -from .transports.grpc_asyncio import TensorboardServiceGrpcAsyncIOTransport - - -class TensorboardServiceClientMeta(type): - """Metaclass for the TensorboardService client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[TensorboardServiceTransport]] - _transport_registry["grpc"] = TensorboardServiceGrpcTransport - _transport_registry["grpc_asyncio"] = TensorboardServiceGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[TensorboardServiceTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class TensorboardServiceClient(metaclass=TensorboardServiceClientMeta): - """TensorboardService""" - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "aiplatform.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - TensorboardServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - TensorboardServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> TensorboardServiceTransport: - """Returns the transport used by the client instance. - - Returns: - TensorboardServiceTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def tensorboard_path(project: str,location: str,tensorboard: str,) -> str: - """Returns a fully-qualified tensorboard string.""" - return "projects/{project}/locations/{location}/tensorboards/{tensorboard}".format(project=project, location=location, tensorboard=tensorboard, ) - - @staticmethod - def parse_tensorboard_path(path: str) -> Dict[str,str]: - """Parses a tensorboard path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/tensorboards/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def tensorboard_experiment_path(project: str,location: str,tensorboard: str,experiment: str,) -> str: - """Returns a fully-qualified tensorboard_experiment string.""" - return "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}".format(project=project, location=location, tensorboard=tensorboard, experiment=experiment, ) - - @staticmethod - def parse_tensorboard_experiment_path(path: str) -> Dict[str,str]: - """Parses a tensorboard_experiment path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/tensorboards/(?P.+?)/experiments/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def tensorboard_run_path(project: str,location: str,tensorboard: str,experiment: str,run: str,) -> str: - """Returns a fully-qualified tensorboard_run string.""" - return "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}".format(project=project, location=location, tensorboard=tensorboard, experiment=experiment, run=run, ) - - @staticmethod - def parse_tensorboard_run_path(path: str) -> Dict[str,str]: - """Parses a tensorboard_run path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/tensorboards/(?P.+?)/experiments/(?P.+?)/runs/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def tensorboard_time_series_path(project: str,location: str,tensorboard: str,experiment: str,run: str,time_series: str,) -> str: - """Returns a fully-qualified tensorboard_time_series string.""" - return "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}".format(project=project, location=location, tensorboard=tensorboard, experiment=experiment, run=run, time_series=time_series, ) - - @staticmethod - def parse_tensorboard_time_series_path(path: str) -> Dict[str,str]: - """Parses a tensorboard_time_series path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/tensorboards/(?P.+?)/experiments/(?P.+?)/runs/(?P.+?)/timeSeries/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, TensorboardServiceTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the tensorboard service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, TensorboardServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): - raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, TensorboardServiceTransport): - # transport is a TensorboardServiceTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - always_use_jwt_access=True, - ) - - def create_tensorboard(self, - request: Union[tensorboard_service.CreateTensorboardRequest, dict] = None, - *, - parent: str = None, - tensorboard: gca_tensorboard.Tensorboard = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Creates a Tensorboard. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CreateTensorboardRequest, dict]): - The request object. Request message for - [TensorboardService.CreateTensorboard][google.cloud.aiplatform.v1.TensorboardService.CreateTensorboard]. - parent (str): - Required. The resource name of the Location to create - the Tensorboard in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - tensorboard (google.cloud.aiplatform_v1.types.Tensorboard): - Required. The Tensorboard to create. - This corresponds to the ``tensorboard`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Tensorboard` Tensorboard is a physical database that stores users' training metrics. - A default Tensorboard is provided in each region of a - GCP project. If needed users can also create extra - Tensorboards in their projects. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, tensorboard]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.CreateTensorboardRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.CreateTensorboardRequest): - request = tensorboard_service.CreateTensorboardRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if tensorboard is not None: - request.tensorboard = tensorboard - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_tensorboard] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - gca_tensorboard.Tensorboard, - metadata_type=tensorboard_service.CreateTensorboardOperationMetadata, - ) - - # Done; return the response. - return response - - def get_tensorboard(self, - request: Union[tensorboard_service.GetTensorboardRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> tensorboard.Tensorboard: - r"""Gets a Tensorboard. - - Args: - request (Union[google.cloud.aiplatform_v1.types.GetTensorboardRequest, dict]): - The request object. Request message for - [TensorboardService.GetTensorboard][google.cloud.aiplatform.v1.TensorboardService.GetTensorboard]. - name (str): - Required. The name of the Tensorboard resource. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Tensorboard: - Tensorboard is a physical database - that stores users' training metrics. A - default Tensorboard is provided in each - region of a GCP project. If needed users - can also create extra Tensorboards in - their projects. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.GetTensorboardRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.GetTensorboardRequest): - request = tensorboard_service.GetTensorboardRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_tensorboard] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_tensorboard(self, - request: Union[tensorboard_service.UpdateTensorboardRequest, dict] = None, - *, - tensorboard: gca_tensorboard.Tensorboard = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Updates a Tensorboard. - - Args: - request (Union[google.cloud.aiplatform_v1.types.UpdateTensorboardRequest, dict]): - The request object. Request message for - [TensorboardService.UpdateTensorboard][google.cloud.aiplatform.v1.TensorboardService.UpdateTensorboard]. - tensorboard (google.cloud.aiplatform_v1.types.Tensorboard): - Required. The Tensorboard's ``name`` field is used to - identify the Tensorboard to be updated. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` - - This corresponds to the ``tensorboard`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. Field mask is used to specify the fields to be - overwritten in the Tensorboard resource by the update. - The fields specified in the update_mask are relative to - the resource, not the full request. A field will be - overwritten if it is in the mask. If the user does not - provide a mask then all fields will be overwritten if - new values are specified. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.Tensorboard` Tensorboard is a physical database that stores users' training metrics. - A default Tensorboard is provided in each region of a - GCP project. If needed users can also create extra - Tensorboards in their projects. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([tensorboard, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.UpdateTensorboardRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.UpdateTensorboardRequest): - request = tensorboard_service.UpdateTensorboardRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if tensorboard is not None: - request.tensorboard = tensorboard - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_tensorboard] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("tensorboard.name", request.tensorboard.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - gca_tensorboard.Tensorboard, - metadata_type=tensorboard_service.UpdateTensorboardOperationMetadata, - ) - - # Done; return the response. - return response - - def list_tensorboards(self, - request: Union[tensorboard_service.ListTensorboardsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListTensorboardsPager: - r"""Lists Tensorboards in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListTensorboardsRequest, dict]): - The request object. Request message for - [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1.TensorboardService.ListTensorboards]. - parent (str): - Required. The resource name of the Location to list - Tensorboards. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.tensorboard_service.pagers.ListTensorboardsPager: - Response message for - [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1.TensorboardService.ListTensorboards]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.ListTensorboardsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.ListTensorboardsRequest): - request = tensorboard_service.ListTensorboardsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_tensorboards] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListTensorboardsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_tensorboard(self, - request: Union[tensorboard_service.DeleteTensorboardRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a Tensorboard. - - Args: - request (Union[google.cloud.aiplatform_v1.types.DeleteTensorboardRequest, dict]): - The request object. Request message for - [TensorboardService.DeleteTensorboard][google.cloud.aiplatform.v1.TensorboardService.DeleteTensorboard]. - name (str): - Required. The name of the Tensorboard to be deleted. - Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.DeleteTensorboardRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.DeleteTensorboardRequest): - request = tensorboard_service.DeleteTensorboardRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_tensorboard] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def create_tensorboard_experiment(self, - request: Union[tensorboard_service.CreateTensorboardExperimentRequest, dict] = None, - *, - parent: str = None, - tensorboard_experiment: gca_tensorboard_experiment.TensorboardExperiment = None, - tensorboard_experiment_id: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_tensorboard_experiment.TensorboardExperiment: - r"""Creates a TensorboardExperiment. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CreateTensorboardExperimentRequest, dict]): - The request object. Request message for - [TensorboardService.CreateTensorboardExperiment][google.cloud.aiplatform.v1.TensorboardService.CreateTensorboardExperiment]. - parent (str): - Required. The resource name of the Tensorboard to create - the TensorboardExperiment in. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - tensorboard_experiment (google.cloud.aiplatform_v1.types.TensorboardExperiment): - The TensorboardExperiment to create. - This corresponds to the ``tensorboard_experiment`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - tensorboard_experiment_id (str): - Required. The ID to use for the Tensorboard experiment, - which will become the final component of the Tensorboard - experiment's resource name. - - This value should be 1-128 characters, and valid - characters are /[a-z][0-9]-/. - - This corresponds to the ``tensorboard_experiment_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.TensorboardExperiment: - A TensorboardExperiment is a group of - TensorboardRuns, that are typically the - results of a training job run, in a - Tensorboard. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, tensorboard_experiment, tensorboard_experiment_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.CreateTensorboardExperimentRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.CreateTensorboardExperimentRequest): - request = tensorboard_service.CreateTensorboardExperimentRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if tensorboard_experiment is not None: - request.tensorboard_experiment = tensorboard_experiment - if tensorboard_experiment_id is not None: - request.tensorboard_experiment_id = tensorboard_experiment_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_tensorboard_experiment] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_tensorboard_experiment(self, - request: Union[tensorboard_service.GetTensorboardExperimentRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> tensorboard_experiment.TensorboardExperiment: - r"""Gets a TensorboardExperiment. - - Args: - request (Union[google.cloud.aiplatform_v1.types.GetTensorboardExperimentRequest, dict]): - The request object. Request message for - [TensorboardService.GetTensorboardExperiment][google.cloud.aiplatform.v1.TensorboardService.GetTensorboardExperiment]. - name (str): - Required. The name of the TensorboardExperiment - resource. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.TensorboardExperiment: - A TensorboardExperiment is a group of - TensorboardRuns, that are typically the - results of a training job run, in a - Tensorboard. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.GetTensorboardExperimentRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.GetTensorboardExperimentRequest): - request = tensorboard_service.GetTensorboardExperimentRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_tensorboard_experiment] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_tensorboard_experiment(self, - request: Union[tensorboard_service.UpdateTensorboardExperimentRequest, dict] = None, - *, - tensorboard_experiment: gca_tensorboard_experiment.TensorboardExperiment = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_tensorboard_experiment.TensorboardExperiment: - r"""Updates a TensorboardExperiment. - - Args: - request (Union[google.cloud.aiplatform_v1.types.UpdateTensorboardExperimentRequest, dict]): - The request object. Request message for - [TensorboardService.UpdateTensorboardExperiment][google.cloud.aiplatform.v1.TensorboardService.UpdateTensorboardExperiment]. - tensorboard_experiment (google.cloud.aiplatform_v1.types.TensorboardExperiment): - Required. The TensorboardExperiment's ``name`` field is - used to identify the TensorboardExperiment to be - updated. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` - - This corresponds to the ``tensorboard_experiment`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. Field mask is used to specify the fields to be - overwritten in the TensorboardExperiment resource by the - update. The fields specified in the update_mask are - relative to the resource, not the full request. A field - will be overwritten if it is in the mask. If the user - does not provide a mask then all fields will be - overwritten if new values are specified. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.TensorboardExperiment: - A TensorboardExperiment is a group of - TensorboardRuns, that are typically the - results of a training job run, in a - Tensorboard. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([tensorboard_experiment, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.UpdateTensorboardExperimentRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.UpdateTensorboardExperimentRequest): - request = tensorboard_service.UpdateTensorboardExperimentRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if tensorboard_experiment is not None: - request.tensorboard_experiment = tensorboard_experiment - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_tensorboard_experiment] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("tensorboard_experiment.name", request.tensorboard_experiment.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_tensorboard_experiments(self, - request: Union[tensorboard_service.ListTensorboardExperimentsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListTensorboardExperimentsPager: - r"""Lists TensorboardExperiments in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListTensorboardExperimentsRequest, dict]): - The request object. Request message for - [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1.TensorboardService.ListTensorboardExperiments]. - parent (str): - Required. The resource name of the - Tensorboard to list - TensorboardExperiments. Format: - 'projects/{project}/locations/{location}/tensorboards/{tensorboard}' - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.tensorboard_service.pagers.ListTensorboardExperimentsPager: - Response message for - [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1.TensorboardService.ListTensorboardExperiments]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.ListTensorboardExperimentsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.ListTensorboardExperimentsRequest): - request = tensorboard_service.ListTensorboardExperimentsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_tensorboard_experiments] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListTensorboardExperimentsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_tensorboard_experiment(self, - request: Union[tensorboard_service.DeleteTensorboardExperimentRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a TensorboardExperiment. - - Args: - request (Union[google.cloud.aiplatform_v1.types.DeleteTensorboardExperimentRequest, dict]): - The request object. Request message for - [TensorboardService.DeleteTensorboardExperiment][google.cloud.aiplatform.v1.TensorboardService.DeleteTensorboardExperiment]. - name (str): - Required. The name of the TensorboardExperiment to be - deleted. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.DeleteTensorboardExperimentRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.DeleteTensorboardExperimentRequest): - request = tensorboard_service.DeleteTensorboardExperimentRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_tensorboard_experiment] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def create_tensorboard_run(self, - request: Union[tensorboard_service.CreateTensorboardRunRequest, dict] = None, - *, - parent: str = None, - tensorboard_run: gca_tensorboard_run.TensorboardRun = None, - tensorboard_run_id: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_tensorboard_run.TensorboardRun: - r"""Creates a TensorboardRun. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CreateTensorboardRunRequest, dict]): - The request object. Request message for - [TensorboardService.CreateTensorboardRun][google.cloud.aiplatform.v1.TensorboardService.CreateTensorboardRun]. - parent (str): - Required. The resource name of the TensorboardExperiment - to create the TensorboardRun in. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - tensorboard_run (google.cloud.aiplatform_v1.types.TensorboardRun): - Required. The TensorboardRun to - create. - - This corresponds to the ``tensorboard_run`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - tensorboard_run_id (str): - Required. The ID to use for the Tensorboard run, which - will become the final component of the Tensorboard run's - resource name. - - This value should be 1-128 characters, and valid - characters are /[a-z][0-9]-/. - - This corresponds to the ``tensorboard_run_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.TensorboardRun: - TensorboardRun maps to a specific - execution of a training job with a given - set of hyperparameter values, model - definition, dataset, etc - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, tensorboard_run, tensorboard_run_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.CreateTensorboardRunRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.CreateTensorboardRunRequest): - request = tensorboard_service.CreateTensorboardRunRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if tensorboard_run is not None: - request.tensorboard_run = tensorboard_run - if tensorboard_run_id is not None: - request.tensorboard_run_id = tensorboard_run_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_tensorboard_run] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def batch_create_tensorboard_runs(self, - request: Union[tensorboard_service.BatchCreateTensorboardRunsRequest, dict] = None, - *, - parent: str = None, - requests: Sequence[tensorboard_service.CreateTensorboardRunRequest] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> tensorboard_service.BatchCreateTensorboardRunsResponse: - r"""Batch create TensorboardRuns. - - Args: - request (Union[google.cloud.aiplatform_v1.types.BatchCreateTensorboardRunsRequest, dict]): - The request object. Request message for - [TensorboardService.BatchCreateTensorboardRuns][google.cloud.aiplatform.v1.TensorboardService.BatchCreateTensorboardRuns]. - parent (str): - Required. The resource name of the TensorboardExperiment - to create the TensorboardRuns in. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` - The parent field in the CreateTensorboardRunRequest - messages must match this field. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - requests (Sequence[google.cloud.aiplatform_v1.types.CreateTensorboardRunRequest]): - Required. The request message - specifying the TensorboardRuns to - create. A maximum of 1000 - TensorboardRuns can be created in a - batch. - - This corresponds to the ``requests`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.BatchCreateTensorboardRunsResponse: - Response message for - [TensorboardService.BatchCreateTensorboardRuns][google.cloud.aiplatform.v1.TensorboardService.BatchCreateTensorboardRuns]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, requests]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.BatchCreateTensorboardRunsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.BatchCreateTensorboardRunsRequest): - request = tensorboard_service.BatchCreateTensorboardRunsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if requests is not None: - request.requests = requests - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.batch_create_tensorboard_runs] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_tensorboard_run(self, - request: Union[tensorboard_service.GetTensorboardRunRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> tensorboard_run.TensorboardRun: - r"""Gets a TensorboardRun. - - Args: - request (Union[google.cloud.aiplatform_v1.types.GetTensorboardRunRequest, dict]): - The request object. Request message for - [TensorboardService.GetTensorboardRun][google.cloud.aiplatform.v1.TensorboardService.GetTensorboardRun]. - name (str): - Required. The name of the TensorboardRun resource. - Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.TensorboardRun: - TensorboardRun maps to a specific - execution of a training job with a given - set of hyperparameter values, model - definition, dataset, etc - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.GetTensorboardRunRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.GetTensorboardRunRequest): - request = tensorboard_service.GetTensorboardRunRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_tensorboard_run] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_tensorboard_run(self, - request: Union[tensorboard_service.UpdateTensorboardRunRequest, dict] = None, - *, - tensorboard_run: gca_tensorboard_run.TensorboardRun = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_tensorboard_run.TensorboardRun: - r"""Updates a TensorboardRun. - - Args: - request (Union[google.cloud.aiplatform_v1.types.UpdateTensorboardRunRequest, dict]): - The request object. Request message for - [TensorboardService.UpdateTensorboardRun][google.cloud.aiplatform.v1.TensorboardService.UpdateTensorboardRun]. - tensorboard_run (google.cloud.aiplatform_v1.types.TensorboardRun): - Required. The TensorboardRun's ``name`` field is used to - identify the TensorboardRun to be updated. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` - - This corresponds to the ``tensorboard_run`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. Field mask is used to specify the fields to be - overwritten in the TensorboardRun resource by the - update. The fields specified in the update_mask are - relative to the resource, not the full request. A field - will be overwritten if it is in the mask. If the user - does not provide a mask then all fields will be - overwritten if new values are specified. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.TensorboardRun: - TensorboardRun maps to a specific - execution of a training job with a given - set of hyperparameter values, model - definition, dataset, etc - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([tensorboard_run, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.UpdateTensorboardRunRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.UpdateTensorboardRunRequest): - request = tensorboard_service.UpdateTensorboardRunRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if tensorboard_run is not None: - request.tensorboard_run = tensorboard_run - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_tensorboard_run] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("tensorboard_run.name", request.tensorboard_run.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_tensorboard_runs(self, - request: Union[tensorboard_service.ListTensorboardRunsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListTensorboardRunsPager: - r"""Lists TensorboardRuns in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListTensorboardRunsRequest, dict]): - The request object. Request message for - [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1.TensorboardService.ListTensorboardRuns]. - parent (str): - Required. The resource name of the - TensorboardExperiment to list - TensorboardRuns. Format: - 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}' - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.tensorboard_service.pagers.ListTensorboardRunsPager: - Response message for - [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1.TensorboardService.ListTensorboardRuns]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.ListTensorboardRunsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.ListTensorboardRunsRequest): - request = tensorboard_service.ListTensorboardRunsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_tensorboard_runs] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListTensorboardRunsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_tensorboard_run(self, - request: Union[tensorboard_service.DeleteTensorboardRunRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a TensorboardRun. - - Args: - request (Union[google.cloud.aiplatform_v1.types.DeleteTensorboardRunRequest, dict]): - The request object. Request message for - [TensorboardService.DeleteTensorboardRun][google.cloud.aiplatform.v1.TensorboardService.DeleteTensorboardRun]. - name (str): - Required. The name of the TensorboardRun to be deleted. - Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.DeleteTensorboardRunRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.DeleteTensorboardRunRequest): - request = tensorboard_service.DeleteTensorboardRunRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_tensorboard_run] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def batch_create_tensorboard_time_series(self, - request: Union[tensorboard_service.BatchCreateTensorboardTimeSeriesRequest, dict] = None, - *, - parent: str = None, - requests: Sequence[tensorboard_service.CreateTensorboardTimeSeriesRequest] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> tensorboard_service.BatchCreateTensorboardTimeSeriesResponse: - r"""Batch create TensorboardTimeSeries that belong to a - TensorboardExperiment. - - Args: - request (Union[google.cloud.aiplatform_v1.types.BatchCreateTensorboardTimeSeriesRequest, dict]): - The request object. Request message for - [TensorboardService.BatchCreateTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.BatchCreateTensorboardTimeSeries]. - parent (str): - Required. The resource name of the TensorboardExperiment - to create the TensorboardTimeSeries in. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` - The TensorboardRuns referenced by the parent fields in - the CreateTensorboardTimeSeriesRequest messages must be - sub resources of this TensorboardExperiment. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - requests (Sequence[google.cloud.aiplatform_v1.types.CreateTensorboardTimeSeriesRequest]): - Required. The request message - specifying the TensorboardTimeSeries to - create. A maximum of 1000 - TensorboardTimeSeries can be created in - a batch. - - This corresponds to the ``requests`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.BatchCreateTensorboardTimeSeriesResponse: - Response message for - [TensorboardService.BatchCreateTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.BatchCreateTensorboardTimeSeries]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, requests]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.BatchCreateTensorboardTimeSeriesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.BatchCreateTensorboardTimeSeriesRequest): - request = tensorboard_service.BatchCreateTensorboardTimeSeriesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if requests is not None: - request.requests = requests - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.batch_create_tensorboard_time_series] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def create_tensorboard_time_series(self, - request: Union[tensorboard_service.CreateTensorboardTimeSeriesRequest, dict] = None, - *, - parent: str = None, - tensorboard_time_series: gca_tensorboard_time_series.TensorboardTimeSeries = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_tensorboard_time_series.TensorboardTimeSeries: - r"""Creates a TensorboardTimeSeries. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CreateTensorboardTimeSeriesRequest, dict]): - The request object. Request message for - [TensorboardService.CreateTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.CreateTensorboardTimeSeries]. - parent (str): - Required. The resource name of the TensorboardRun to - create the TensorboardTimeSeries in. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - tensorboard_time_series (google.cloud.aiplatform_v1.types.TensorboardTimeSeries): - Required. The TensorboardTimeSeries - to create. - - This corresponds to the ``tensorboard_time_series`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.TensorboardTimeSeries: - TensorboardTimeSeries maps to times - series produced in training runs - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, tensorboard_time_series]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.CreateTensorboardTimeSeriesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.CreateTensorboardTimeSeriesRequest): - request = tensorboard_service.CreateTensorboardTimeSeriesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if tensorboard_time_series is not None: - request.tensorboard_time_series = tensorboard_time_series - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_tensorboard_time_series] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_tensorboard_time_series(self, - request: Union[tensorboard_service.GetTensorboardTimeSeriesRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> tensorboard_time_series.TensorboardTimeSeries: - r"""Gets a TensorboardTimeSeries. - - Args: - request (Union[google.cloud.aiplatform_v1.types.GetTensorboardTimeSeriesRequest, dict]): - The request object. Request message for - [TensorboardService.GetTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.GetTensorboardTimeSeries]. - name (str): - Required. The name of the TensorboardTimeSeries - resource. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.TensorboardTimeSeries: - TensorboardTimeSeries maps to times - series produced in training runs - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.GetTensorboardTimeSeriesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.GetTensorboardTimeSeriesRequest): - request = tensorboard_service.GetTensorboardTimeSeriesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_tensorboard_time_series] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_tensorboard_time_series(self, - request: Union[tensorboard_service.UpdateTensorboardTimeSeriesRequest, dict] = None, - *, - tensorboard_time_series: gca_tensorboard_time_series.TensorboardTimeSeries = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_tensorboard_time_series.TensorboardTimeSeries: - r"""Updates a TensorboardTimeSeries. - - Args: - request (Union[google.cloud.aiplatform_v1.types.UpdateTensorboardTimeSeriesRequest, dict]): - The request object. Request message for - [TensorboardService.UpdateTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.UpdateTensorboardTimeSeries]. - tensorboard_time_series (google.cloud.aiplatform_v1.types.TensorboardTimeSeries): - Required. The TensorboardTimeSeries' ``name`` field is - used to identify the TensorboardTimeSeries to be - updated. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` - - This corresponds to the ``tensorboard_time_series`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. Field mask is used to specify the fields to be - overwritten in the TensorboardTimeSeries resource by the - update. The fields specified in the update_mask are - relative to the resource, not the full request. A field - will be overwritten if it is in the mask. If the user - does not provide a mask then all fields will be - overwritten if new values are specified. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.TensorboardTimeSeries: - TensorboardTimeSeries maps to times - series produced in training runs - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([tensorboard_time_series, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.UpdateTensorboardTimeSeriesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.UpdateTensorboardTimeSeriesRequest): - request = tensorboard_service.UpdateTensorboardTimeSeriesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if tensorboard_time_series is not None: - request.tensorboard_time_series = tensorboard_time_series - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_tensorboard_time_series] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("tensorboard_time_series.name", request.tensorboard_time_series.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_tensorboard_time_series(self, - request: Union[tensorboard_service.ListTensorboardTimeSeriesRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListTensorboardTimeSeriesPager: - r"""Lists TensorboardTimeSeries in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListTensorboardTimeSeriesRequest, dict]): - The request object. Request message for - [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.ListTensorboardTimeSeries]. - parent (str): - Required. The resource name of the - TensorboardRun to list - TensorboardTimeSeries. Format: - 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}' - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.tensorboard_service.pagers.ListTensorboardTimeSeriesPager: - Response message for - [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.ListTensorboardTimeSeries]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.ListTensorboardTimeSeriesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.ListTensorboardTimeSeriesRequest): - request = tensorboard_service.ListTensorboardTimeSeriesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_tensorboard_time_series] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListTensorboardTimeSeriesPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_tensorboard_time_series(self, - request: Union[tensorboard_service.DeleteTensorboardTimeSeriesRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a TensorboardTimeSeries. - - Args: - request (Union[google.cloud.aiplatform_v1.types.DeleteTensorboardTimeSeriesRequest, dict]): - The request object. Request message for - [TensorboardService.DeleteTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.DeleteTensorboardTimeSeries]. - name (str): - Required. The name of the TensorboardTimeSeries to be - deleted. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.DeleteTensorboardTimeSeriesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.DeleteTensorboardTimeSeriesRequest): - request = tensorboard_service.DeleteTensorboardTimeSeriesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_tensorboard_time_series] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def batch_read_tensorboard_time_series_data(self, - request: Union[tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest, dict] = None, - *, - tensorboard: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse: - r"""Reads multiple TensorboardTimeSeries' data. The data - point number limit is 1000 for scalars, 100 for tensors - and blob references. If the number of data points stored - is less than the limit, all data will be returned. - Otherwise, that limit number of data points will be - randomly selected from this time series and returned. - - Args: - request (Union[google.cloud.aiplatform_v1.types.BatchReadTensorboardTimeSeriesDataRequest, dict]): - The request object. Request message for - [TensorboardService.BatchReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1.TensorboardService.BatchReadTensorboardTimeSeriesData]. - tensorboard (str): - Required. The resource name of the Tensorboard - containing TensorboardTimeSeries to read data from. - Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}``. - The TensorboardTimeSeries referenced by - [time_series][google.cloud.aiplatform.v1.BatchReadTensorboardTimeSeriesDataRequest.time_series] - must be sub resources of this Tensorboard. - - This corresponds to the ``tensorboard`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.BatchReadTensorboardTimeSeriesDataResponse: - Response message for - [TensorboardService.BatchReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1.TensorboardService.BatchReadTensorboardTimeSeriesData]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([tensorboard]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest): - request = tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if tensorboard is not None: - request.tensorboard = tensorboard - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.batch_read_tensorboard_time_series_data] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("tensorboard", request.tensorboard), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def read_tensorboard_time_series_data(self, - request: Union[tensorboard_service.ReadTensorboardTimeSeriesDataRequest, dict] = None, - *, - tensorboard_time_series: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> tensorboard_service.ReadTensorboardTimeSeriesDataResponse: - r"""Reads a TensorboardTimeSeries' data. By default, if the number - of data points stored is less than 1000, all data will be - returned. Otherwise, 1000 data points will be randomly selected - from this time series and returned. This value can be changed by - changing max_data_points, which can't be greater than 10k. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ReadTensorboardTimeSeriesDataRequest, dict]): - The request object. Request message for - [TensorboardService.ReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1.TensorboardService.ReadTensorboardTimeSeriesData]. - tensorboard_time_series (str): - Required. The resource name of the TensorboardTimeSeries - to read data from. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` - - This corresponds to the ``tensorboard_time_series`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.ReadTensorboardTimeSeriesDataResponse: - Response message for - [TensorboardService.ReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1.TensorboardService.ReadTensorboardTimeSeriesData]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([tensorboard_time_series]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.ReadTensorboardTimeSeriesDataRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.ReadTensorboardTimeSeriesDataRequest): - request = tensorboard_service.ReadTensorboardTimeSeriesDataRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if tensorboard_time_series is not None: - request.tensorboard_time_series = tensorboard_time_series - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.read_tensorboard_time_series_data] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("tensorboard_time_series", request.tensorboard_time_series), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def read_tensorboard_blob_data(self, - request: Union[tensorboard_service.ReadTensorboardBlobDataRequest, dict] = None, - *, - time_series: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> Iterable[tensorboard_service.ReadTensorboardBlobDataResponse]: - r"""Gets bytes of TensorboardBlobs. - This is to allow reading blob data stored in consumer - project's Cloud Storage bucket without users having to - obtain Cloud Storage access permission. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ReadTensorboardBlobDataRequest, dict]): - The request object. Request message for - [TensorboardService.ReadTensorboardBlobData][google.cloud.aiplatform.v1.TensorboardService.ReadTensorboardBlobData]. - time_series (str): - Required. The resource name of the TensorboardTimeSeries - to list Blobs. Format: - 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}' - - This corresponds to the ``time_series`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - Iterable[google.cloud.aiplatform_v1.types.ReadTensorboardBlobDataResponse]: - Response message for - [TensorboardService.ReadTensorboardBlobData][google.cloud.aiplatform.v1.TensorboardService.ReadTensorboardBlobData]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([time_series]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.ReadTensorboardBlobDataRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.ReadTensorboardBlobDataRequest): - request = tensorboard_service.ReadTensorboardBlobDataRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if time_series is not None: - request.time_series = time_series - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.read_tensorboard_blob_data] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("time_series", request.time_series), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def write_tensorboard_experiment_data(self, - request: Union[tensorboard_service.WriteTensorboardExperimentDataRequest, dict] = None, - *, - tensorboard_experiment: str = None, - write_run_data_requests: Sequence[tensorboard_service.WriteTensorboardRunDataRequest] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> tensorboard_service.WriteTensorboardExperimentDataResponse: - r"""Write time series data points of multiple - TensorboardTimeSeries in multiple TensorboardRun's. If - any data fail to be ingested, an error will be returned. - - Args: - request (Union[google.cloud.aiplatform_v1.types.WriteTensorboardExperimentDataRequest, dict]): - The request object. Request message for - [TensorboardService.WriteTensorboardExperimentData][google.cloud.aiplatform.v1.TensorboardService.WriteTensorboardExperimentData]. - tensorboard_experiment (str): - Required. The resource name of the TensorboardExperiment - to write data to. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` - - This corresponds to the ``tensorboard_experiment`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - write_run_data_requests (Sequence[google.cloud.aiplatform_v1.types.WriteTensorboardRunDataRequest]): - Required. Requests containing per-run - TensorboardTimeSeries data to write. - - This corresponds to the ``write_run_data_requests`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.WriteTensorboardExperimentDataResponse: - Response message for - [TensorboardService.WriteTensorboardExperimentData][google.cloud.aiplatform.v1.TensorboardService.WriteTensorboardExperimentData]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([tensorboard_experiment, write_run_data_requests]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.WriteTensorboardExperimentDataRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.WriteTensorboardExperimentDataRequest): - request = tensorboard_service.WriteTensorboardExperimentDataRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if tensorboard_experiment is not None: - request.tensorboard_experiment = tensorboard_experiment - if write_run_data_requests is not None: - request.write_run_data_requests = write_run_data_requests - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.write_tensorboard_experiment_data] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("tensorboard_experiment", request.tensorboard_experiment), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def write_tensorboard_run_data(self, - request: Union[tensorboard_service.WriteTensorboardRunDataRequest, dict] = None, - *, - tensorboard_run: str = None, - time_series_data: Sequence[tensorboard_data.TimeSeriesData] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> tensorboard_service.WriteTensorboardRunDataResponse: - r"""Write time series data points into multiple - TensorboardTimeSeries under a TensorboardRun. If any - data fail to be ingested, an error will be returned. - - Args: - request (Union[google.cloud.aiplatform_v1.types.WriteTensorboardRunDataRequest, dict]): - The request object. Request message for - [TensorboardService.WriteTensorboardRunData][google.cloud.aiplatform.v1.TensorboardService.WriteTensorboardRunData]. - tensorboard_run (str): - Required. The resource name of the TensorboardRun to - write data to. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` - - This corresponds to the ``tensorboard_run`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - time_series_data (Sequence[google.cloud.aiplatform_v1.types.TimeSeriesData]): - Required. The TensorboardTimeSeries - data to write. Values with in a time - series are indexed by their step value. - Repeated writes to the same step will - overwrite the existing value for that - step. - The upper limit of data points per write - request is 5000. - - This corresponds to the ``time_series_data`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.WriteTensorboardRunDataResponse: - Response message for - [TensorboardService.WriteTensorboardRunData][google.cloud.aiplatform.v1.TensorboardService.WriteTensorboardRunData]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([tensorboard_run, time_series_data]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.WriteTensorboardRunDataRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.WriteTensorboardRunDataRequest): - request = tensorboard_service.WriteTensorboardRunDataRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if tensorboard_run is not None: - request.tensorboard_run = tensorboard_run - if time_series_data is not None: - request.time_series_data = time_series_data - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.write_tensorboard_run_data] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("tensorboard_run", request.tensorboard_run), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def export_tensorboard_time_series_data(self, - request: Union[tensorboard_service.ExportTensorboardTimeSeriesDataRequest, dict] = None, - *, - tensorboard_time_series: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ExportTensorboardTimeSeriesDataPager: - r"""Exports a TensorboardTimeSeries' data. Data is - returned in paginated responses. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ExportTensorboardTimeSeriesDataRequest, dict]): - The request object. Request message for - [TensorboardService.ExportTensorboardTimeSeriesData][google.cloud.aiplatform.v1.TensorboardService.ExportTensorboardTimeSeriesData]. - tensorboard_time_series (str): - Required. The resource name of the TensorboardTimeSeries - to export data from. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` - - This corresponds to the ``tensorboard_time_series`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.tensorboard_service.pagers.ExportTensorboardTimeSeriesDataPager: - Response message for - [TensorboardService.ExportTensorboardTimeSeriesData][google.cloud.aiplatform.v1.TensorboardService.ExportTensorboardTimeSeriesData]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([tensorboard_time_series]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.ExportTensorboardTimeSeriesDataRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.ExportTensorboardTimeSeriesDataRequest): - request = tensorboard_service.ExportTensorboardTimeSeriesDataRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if tensorboard_time_series is not None: - request.tensorboard_time_series = tensorboard_time_series - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.export_tensorboard_time_series_data] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("tensorboard_time_series", request.tensorboard_time_series), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ExportTensorboardTimeSeriesDataPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - """Releases underlying transport's resources. - - .. warning:: - ONLY use as a context manager if the transport is NOT shared - with other clients! Exiting the with block will CLOSE the transport - and may cause errors in other clients! - """ - self.transport.close() - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "TensorboardServiceClient", -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/pagers.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/pagers.py deleted file mode 100644 index 50bc5aa6b2..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/pagers.py +++ /dev/null @@ -1,633 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator - -from google.cloud.aiplatform_v1.types import tensorboard -from google.cloud.aiplatform_v1.types import tensorboard_data -from google.cloud.aiplatform_v1.types import tensorboard_experiment -from google.cloud.aiplatform_v1.types import tensorboard_run -from google.cloud.aiplatform_v1.types import tensorboard_service -from google.cloud.aiplatform_v1.types import tensorboard_time_series - - -class ListTensorboardsPager: - """A pager for iterating through ``list_tensorboards`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListTensorboardsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``tensorboards`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListTensorboards`` requests and continue to iterate - through the ``tensorboards`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListTensorboardsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., tensorboard_service.ListTensorboardsResponse], - request: tensorboard_service.ListTensorboardsRequest, - response: tensorboard_service.ListTensorboardsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListTensorboardsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListTensorboardsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = tensorboard_service.ListTensorboardsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[tensorboard_service.ListTensorboardsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[tensorboard.Tensorboard]: - for page in self.pages: - yield from page.tensorboards - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListTensorboardsAsyncPager: - """A pager for iterating through ``list_tensorboards`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListTensorboardsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``tensorboards`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListTensorboards`` requests and continue to iterate - through the ``tensorboards`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListTensorboardsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[tensorboard_service.ListTensorboardsResponse]], - request: tensorboard_service.ListTensorboardsRequest, - response: tensorboard_service.ListTensorboardsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListTensorboardsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListTensorboardsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = tensorboard_service.ListTensorboardsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[tensorboard_service.ListTensorboardsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[tensorboard.Tensorboard]: - async def async_generator(): - async for page in self.pages: - for response in page.tensorboards: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListTensorboardExperimentsPager: - """A pager for iterating through ``list_tensorboard_experiments`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListTensorboardExperimentsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``tensorboard_experiments`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListTensorboardExperiments`` requests and continue to iterate - through the ``tensorboard_experiments`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListTensorboardExperimentsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., tensorboard_service.ListTensorboardExperimentsResponse], - request: tensorboard_service.ListTensorboardExperimentsRequest, - response: tensorboard_service.ListTensorboardExperimentsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListTensorboardExperimentsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListTensorboardExperimentsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = tensorboard_service.ListTensorboardExperimentsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[tensorboard_service.ListTensorboardExperimentsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[tensorboard_experiment.TensorboardExperiment]: - for page in self.pages: - yield from page.tensorboard_experiments - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListTensorboardExperimentsAsyncPager: - """A pager for iterating through ``list_tensorboard_experiments`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListTensorboardExperimentsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``tensorboard_experiments`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListTensorboardExperiments`` requests and continue to iterate - through the ``tensorboard_experiments`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListTensorboardExperimentsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[tensorboard_service.ListTensorboardExperimentsResponse]], - request: tensorboard_service.ListTensorboardExperimentsRequest, - response: tensorboard_service.ListTensorboardExperimentsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListTensorboardExperimentsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListTensorboardExperimentsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = tensorboard_service.ListTensorboardExperimentsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[tensorboard_service.ListTensorboardExperimentsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[tensorboard_experiment.TensorboardExperiment]: - async def async_generator(): - async for page in self.pages: - for response in page.tensorboard_experiments: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListTensorboardRunsPager: - """A pager for iterating through ``list_tensorboard_runs`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListTensorboardRunsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``tensorboard_runs`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListTensorboardRuns`` requests and continue to iterate - through the ``tensorboard_runs`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListTensorboardRunsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., tensorboard_service.ListTensorboardRunsResponse], - request: tensorboard_service.ListTensorboardRunsRequest, - response: tensorboard_service.ListTensorboardRunsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListTensorboardRunsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListTensorboardRunsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = tensorboard_service.ListTensorboardRunsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[tensorboard_service.ListTensorboardRunsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[tensorboard_run.TensorboardRun]: - for page in self.pages: - yield from page.tensorboard_runs - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListTensorboardRunsAsyncPager: - """A pager for iterating through ``list_tensorboard_runs`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListTensorboardRunsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``tensorboard_runs`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListTensorboardRuns`` requests and continue to iterate - through the ``tensorboard_runs`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListTensorboardRunsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[tensorboard_service.ListTensorboardRunsResponse]], - request: tensorboard_service.ListTensorboardRunsRequest, - response: tensorboard_service.ListTensorboardRunsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListTensorboardRunsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListTensorboardRunsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = tensorboard_service.ListTensorboardRunsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[tensorboard_service.ListTensorboardRunsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[tensorboard_run.TensorboardRun]: - async def async_generator(): - async for page in self.pages: - for response in page.tensorboard_runs: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListTensorboardTimeSeriesPager: - """A pager for iterating through ``list_tensorboard_time_series`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListTensorboardTimeSeriesResponse` object, and - provides an ``__iter__`` method to iterate through its - ``tensorboard_time_series`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListTensorboardTimeSeries`` requests and continue to iterate - through the ``tensorboard_time_series`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListTensorboardTimeSeriesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., tensorboard_service.ListTensorboardTimeSeriesResponse], - request: tensorboard_service.ListTensorboardTimeSeriesRequest, - response: tensorboard_service.ListTensorboardTimeSeriesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListTensorboardTimeSeriesRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListTensorboardTimeSeriesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = tensorboard_service.ListTensorboardTimeSeriesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[tensorboard_service.ListTensorboardTimeSeriesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[tensorboard_time_series.TensorboardTimeSeries]: - for page in self.pages: - yield from page.tensorboard_time_series - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListTensorboardTimeSeriesAsyncPager: - """A pager for iterating through ``list_tensorboard_time_series`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListTensorboardTimeSeriesResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``tensorboard_time_series`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListTensorboardTimeSeries`` requests and continue to iterate - through the ``tensorboard_time_series`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListTensorboardTimeSeriesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[tensorboard_service.ListTensorboardTimeSeriesResponse]], - request: tensorboard_service.ListTensorboardTimeSeriesRequest, - response: tensorboard_service.ListTensorboardTimeSeriesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListTensorboardTimeSeriesRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListTensorboardTimeSeriesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = tensorboard_service.ListTensorboardTimeSeriesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[tensorboard_service.ListTensorboardTimeSeriesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[tensorboard_time_series.TensorboardTimeSeries]: - async def async_generator(): - async for page in self.pages: - for response in page.tensorboard_time_series: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ExportTensorboardTimeSeriesDataPager: - """A pager for iterating through ``export_tensorboard_time_series_data`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ExportTensorboardTimeSeriesDataResponse` object, and - provides an ``__iter__`` method to iterate through its - ``time_series_data_points`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ExportTensorboardTimeSeriesData`` requests and continue to iterate - through the ``time_series_data_points`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ExportTensorboardTimeSeriesDataResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., tensorboard_service.ExportTensorboardTimeSeriesDataResponse], - request: tensorboard_service.ExportTensorboardTimeSeriesDataRequest, - response: tensorboard_service.ExportTensorboardTimeSeriesDataResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ExportTensorboardTimeSeriesDataRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ExportTensorboardTimeSeriesDataResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = tensorboard_service.ExportTensorboardTimeSeriesDataRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[tensorboard_service.ExportTensorboardTimeSeriesDataResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[tensorboard_data.TimeSeriesDataPoint]: - for page in self.pages: - yield from page.time_series_data_points - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ExportTensorboardTimeSeriesDataAsyncPager: - """A pager for iterating through ``export_tensorboard_time_series_data`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ExportTensorboardTimeSeriesDataResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``time_series_data_points`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ExportTensorboardTimeSeriesData`` requests and continue to iterate - through the ``time_series_data_points`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ExportTensorboardTimeSeriesDataResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[tensorboard_service.ExportTensorboardTimeSeriesDataResponse]], - request: tensorboard_service.ExportTensorboardTimeSeriesDataRequest, - response: tensorboard_service.ExportTensorboardTimeSeriesDataResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ExportTensorboardTimeSeriesDataRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ExportTensorboardTimeSeriesDataResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = tensorboard_service.ExportTensorboardTimeSeriesDataRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[tensorboard_service.ExportTensorboardTimeSeriesDataResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[tensorboard_data.TimeSeriesDataPoint]: - async def async_generator(): - async for page in self.pages: - for response in page.time_series_data_points: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/transports/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/transports/__init__.py deleted file mode 100644 index 9565b55932..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import TensorboardServiceTransport -from .grpc import TensorboardServiceGrpcTransport -from .grpc_asyncio import TensorboardServiceGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[TensorboardServiceTransport]] -_transport_registry['grpc'] = TensorboardServiceGrpcTransport -_transport_registry['grpc_asyncio'] = TensorboardServiceGrpcAsyncIOTransport - -__all__ = ( - 'TensorboardServiceTransport', - 'TensorboardServiceGrpcTransport', - 'TensorboardServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/transports/base.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/transports/base.py deleted file mode 100644 index 8d8c5272c8..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/transports/base.py +++ /dev/null @@ -1,539 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import pkg_resources - -import google.auth # type: ignore -import google.api_core -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.aiplatform_v1.types import tensorboard -from google.cloud.aiplatform_v1.types import tensorboard_experiment -from google.cloud.aiplatform_v1.types import tensorboard_experiment as gca_tensorboard_experiment -from google.cloud.aiplatform_v1.types import tensorboard_run -from google.cloud.aiplatform_v1.types import tensorboard_run as gca_tensorboard_run -from google.cloud.aiplatform_v1.types import tensorboard_service -from google.cloud.aiplatform_v1.types import tensorboard_time_series -from google.cloud.aiplatform_v1.types import tensorboard_time_series as gca_tensorboard_time_series -from google.longrunning import operations_pb2 # type: ignore - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-aiplatform', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -class TensorboardServiceTransport(abc.ABC): - """Abstract transport class for TensorboardService.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - 'https://www.googleapis.com/auth/cloud-platform.read-only', - ) - - DEFAULT_HOST: str = 'aiplatform.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials are service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.create_tensorboard: gapic_v1.method.wrap_method( - self.create_tensorboard, - default_timeout=None, - client_info=client_info, - ), - self.get_tensorboard: gapic_v1.method.wrap_method( - self.get_tensorboard, - default_timeout=None, - client_info=client_info, - ), - self.update_tensorboard: gapic_v1.method.wrap_method( - self.update_tensorboard, - default_timeout=None, - client_info=client_info, - ), - self.list_tensorboards: gapic_v1.method.wrap_method( - self.list_tensorboards, - default_timeout=None, - client_info=client_info, - ), - self.delete_tensorboard: gapic_v1.method.wrap_method( - self.delete_tensorboard, - default_timeout=None, - client_info=client_info, - ), - self.create_tensorboard_experiment: gapic_v1.method.wrap_method( - self.create_tensorboard_experiment, - default_timeout=None, - client_info=client_info, - ), - self.get_tensorboard_experiment: gapic_v1.method.wrap_method( - self.get_tensorboard_experiment, - default_timeout=None, - client_info=client_info, - ), - self.update_tensorboard_experiment: gapic_v1.method.wrap_method( - self.update_tensorboard_experiment, - default_timeout=None, - client_info=client_info, - ), - self.list_tensorboard_experiments: gapic_v1.method.wrap_method( - self.list_tensorboard_experiments, - default_timeout=None, - client_info=client_info, - ), - self.delete_tensorboard_experiment: gapic_v1.method.wrap_method( - self.delete_tensorboard_experiment, - default_timeout=None, - client_info=client_info, - ), - self.create_tensorboard_run: gapic_v1.method.wrap_method( - self.create_tensorboard_run, - default_timeout=None, - client_info=client_info, - ), - self.batch_create_tensorboard_runs: gapic_v1.method.wrap_method( - self.batch_create_tensorboard_runs, - default_timeout=None, - client_info=client_info, - ), - self.get_tensorboard_run: gapic_v1.method.wrap_method( - self.get_tensorboard_run, - default_timeout=None, - client_info=client_info, - ), - self.update_tensorboard_run: gapic_v1.method.wrap_method( - self.update_tensorboard_run, - default_timeout=None, - client_info=client_info, - ), - self.list_tensorboard_runs: gapic_v1.method.wrap_method( - self.list_tensorboard_runs, - default_timeout=None, - client_info=client_info, - ), - self.delete_tensorboard_run: gapic_v1.method.wrap_method( - self.delete_tensorboard_run, - default_timeout=None, - client_info=client_info, - ), - self.batch_create_tensorboard_time_series: gapic_v1.method.wrap_method( - self.batch_create_tensorboard_time_series, - default_timeout=None, - client_info=client_info, - ), - self.create_tensorboard_time_series: gapic_v1.method.wrap_method( - self.create_tensorboard_time_series, - default_timeout=None, - client_info=client_info, - ), - self.get_tensorboard_time_series: gapic_v1.method.wrap_method( - self.get_tensorboard_time_series, - default_timeout=None, - client_info=client_info, - ), - self.update_tensorboard_time_series: gapic_v1.method.wrap_method( - self.update_tensorboard_time_series, - default_timeout=None, - client_info=client_info, - ), - self.list_tensorboard_time_series: gapic_v1.method.wrap_method( - self.list_tensorboard_time_series, - default_timeout=None, - client_info=client_info, - ), - self.delete_tensorboard_time_series: gapic_v1.method.wrap_method( - self.delete_tensorboard_time_series, - default_timeout=None, - client_info=client_info, - ), - self.batch_read_tensorboard_time_series_data: gapic_v1.method.wrap_method( - self.batch_read_tensorboard_time_series_data, - default_timeout=None, - client_info=client_info, - ), - self.read_tensorboard_time_series_data: gapic_v1.method.wrap_method( - self.read_tensorboard_time_series_data, - default_timeout=None, - client_info=client_info, - ), - self.read_tensorboard_blob_data: gapic_v1.method.wrap_method( - self.read_tensorboard_blob_data, - default_timeout=None, - client_info=client_info, - ), - self.write_tensorboard_experiment_data: gapic_v1.method.wrap_method( - self.write_tensorboard_experiment_data, - default_timeout=None, - client_info=client_info, - ), - self.write_tensorboard_run_data: gapic_v1.method.wrap_method( - self.write_tensorboard_run_data, - default_timeout=None, - client_info=client_info, - ), - self.export_tensorboard_time_series_data: gapic_v1.method.wrap_method( - self.export_tensorboard_time_series_data, - default_timeout=None, - client_info=client_info, - ), - } - - def close(self): - """Closes resources associated with the transport. - - .. warning:: - Only call this method if the transport is NOT shared - with other clients - this may cause errors in other clients! - """ - raise NotImplementedError() - - @property - def operations_client(self): - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def create_tensorboard(self) -> Callable[ - [tensorboard_service.CreateTensorboardRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def get_tensorboard(self) -> Callable[ - [tensorboard_service.GetTensorboardRequest], - Union[ - tensorboard.Tensorboard, - Awaitable[tensorboard.Tensorboard] - ]]: - raise NotImplementedError() - - @property - def update_tensorboard(self) -> Callable[ - [tensorboard_service.UpdateTensorboardRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def list_tensorboards(self) -> Callable[ - [tensorboard_service.ListTensorboardsRequest], - Union[ - tensorboard_service.ListTensorboardsResponse, - Awaitable[tensorboard_service.ListTensorboardsResponse] - ]]: - raise NotImplementedError() - - @property - def delete_tensorboard(self) -> Callable[ - [tensorboard_service.DeleteTensorboardRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def create_tensorboard_experiment(self) -> Callable[ - [tensorboard_service.CreateTensorboardExperimentRequest], - Union[ - gca_tensorboard_experiment.TensorboardExperiment, - Awaitable[gca_tensorboard_experiment.TensorboardExperiment] - ]]: - raise NotImplementedError() - - @property - def get_tensorboard_experiment(self) -> Callable[ - [tensorboard_service.GetTensorboardExperimentRequest], - Union[ - tensorboard_experiment.TensorboardExperiment, - Awaitable[tensorboard_experiment.TensorboardExperiment] - ]]: - raise NotImplementedError() - - @property - def update_tensorboard_experiment(self) -> Callable[ - [tensorboard_service.UpdateTensorboardExperimentRequest], - Union[ - gca_tensorboard_experiment.TensorboardExperiment, - Awaitable[gca_tensorboard_experiment.TensorboardExperiment] - ]]: - raise NotImplementedError() - - @property - def list_tensorboard_experiments(self) -> Callable[ - [tensorboard_service.ListTensorboardExperimentsRequest], - Union[ - tensorboard_service.ListTensorboardExperimentsResponse, - Awaitable[tensorboard_service.ListTensorboardExperimentsResponse] - ]]: - raise NotImplementedError() - - @property - def delete_tensorboard_experiment(self) -> Callable[ - [tensorboard_service.DeleteTensorboardExperimentRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def create_tensorboard_run(self) -> Callable[ - [tensorboard_service.CreateTensorboardRunRequest], - Union[ - gca_tensorboard_run.TensorboardRun, - Awaitable[gca_tensorboard_run.TensorboardRun] - ]]: - raise NotImplementedError() - - @property - def batch_create_tensorboard_runs(self) -> Callable[ - [tensorboard_service.BatchCreateTensorboardRunsRequest], - Union[ - tensorboard_service.BatchCreateTensorboardRunsResponse, - Awaitable[tensorboard_service.BatchCreateTensorboardRunsResponse] - ]]: - raise NotImplementedError() - - @property - def get_tensorboard_run(self) -> Callable[ - [tensorboard_service.GetTensorboardRunRequest], - Union[ - tensorboard_run.TensorboardRun, - Awaitable[tensorboard_run.TensorboardRun] - ]]: - raise NotImplementedError() - - @property - def update_tensorboard_run(self) -> Callable[ - [tensorboard_service.UpdateTensorboardRunRequest], - Union[ - gca_tensorboard_run.TensorboardRun, - Awaitable[gca_tensorboard_run.TensorboardRun] - ]]: - raise NotImplementedError() - - @property - def list_tensorboard_runs(self) -> Callable[ - [tensorboard_service.ListTensorboardRunsRequest], - Union[ - tensorboard_service.ListTensorboardRunsResponse, - Awaitable[tensorboard_service.ListTensorboardRunsResponse] - ]]: - raise NotImplementedError() - - @property - def delete_tensorboard_run(self) -> Callable[ - [tensorboard_service.DeleteTensorboardRunRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def batch_create_tensorboard_time_series(self) -> Callable[ - [tensorboard_service.BatchCreateTensorboardTimeSeriesRequest], - Union[ - tensorboard_service.BatchCreateTensorboardTimeSeriesResponse, - Awaitable[tensorboard_service.BatchCreateTensorboardTimeSeriesResponse] - ]]: - raise NotImplementedError() - - @property - def create_tensorboard_time_series(self) -> Callable[ - [tensorboard_service.CreateTensorboardTimeSeriesRequest], - Union[ - gca_tensorboard_time_series.TensorboardTimeSeries, - Awaitable[gca_tensorboard_time_series.TensorboardTimeSeries] - ]]: - raise NotImplementedError() - - @property - def get_tensorboard_time_series(self) -> Callable[ - [tensorboard_service.GetTensorboardTimeSeriesRequest], - Union[ - tensorboard_time_series.TensorboardTimeSeries, - Awaitable[tensorboard_time_series.TensorboardTimeSeries] - ]]: - raise NotImplementedError() - - @property - def update_tensorboard_time_series(self) -> Callable[ - [tensorboard_service.UpdateTensorboardTimeSeriesRequest], - Union[ - gca_tensorboard_time_series.TensorboardTimeSeries, - Awaitable[gca_tensorboard_time_series.TensorboardTimeSeries] - ]]: - raise NotImplementedError() - - @property - def list_tensorboard_time_series(self) -> Callable[ - [tensorboard_service.ListTensorboardTimeSeriesRequest], - Union[ - tensorboard_service.ListTensorboardTimeSeriesResponse, - Awaitable[tensorboard_service.ListTensorboardTimeSeriesResponse] - ]]: - raise NotImplementedError() - - @property - def delete_tensorboard_time_series(self) -> Callable[ - [tensorboard_service.DeleteTensorboardTimeSeriesRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def batch_read_tensorboard_time_series_data(self) -> Callable[ - [tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest], - Union[ - tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse, - Awaitable[tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse] - ]]: - raise NotImplementedError() - - @property - def read_tensorboard_time_series_data(self) -> Callable[ - [tensorboard_service.ReadTensorboardTimeSeriesDataRequest], - Union[ - tensorboard_service.ReadTensorboardTimeSeriesDataResponse, - Awaitable[tensorboard_service.ReadTensorboardTimeSeriesDataResponse] - ]]: - raise NotImplementedError() - - @property - def read_tensorboard_blob_data(self) -> Callable[ - [tensorboard_service.ReadTensorboardBlobDataRequest], - Union[ - tensorboard_service.ReadTensorboardBlobDataResponse, - Awaitable[tensorboard_service.ReadTensorboardBlobDataResponse] - ]]: - raise NotImplementedError() - - @property - def write_tensorboard_experiment_data(self) -> Callable[ - [tensorboard_service.WriteTensorboardExperimentDataRequest], - Union[ - tensorboard_service.WriteTensorboardExperimentDataResponse, - Awaitable[tensorboard_service.WriteTensorboardExperimentDataResponse] - ]]: - raise NotImplementedError() - - @property - def write_tensorboard_run_data(self) -> Callable[ - [tensorboard_service.WriteTensorboardRunDataRequest], - Union[ - tensorboard_service.WriteTensorboardRunDataResponse, - Awaitable[tensorboard_service.WriteTensorboardRunDataResponse] - ]]: - raise NotImplementedError() - - @property - def export_tensorboard_time_series_data(self) -> Callable[ - [tensorboard_service.ExportTensorboardTimeSeriesDataRequest], - Union[ - tensorboard_service.ExportTensorboardTimeSeriesDataResponse, - Awaitable[tensorboard_service.ExportTensorboardTimeSeriesDataResponse] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'TensorboardServiceTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/transports/grpc.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/transports/grpc.py deleted file mode 100644 index f53bc138be..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/transports/grpc.py +++ /dev/null @@ -1,1005 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers -from google.api_core import operations_v1 -from google.api_core import gapic_v1 -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.aiplatform_v1.types import tensorboard -from google.cloud.aiplatform_v1.types import tensorboard_experiment -from google.cloud.aiplatform_v1.types import tensorboard_experiment as gca_tensorboard_experiment -from google.cloud.aiplatform_v1.types import tensorboard_run -from google.cloud.aiplatform_v1.types import tensorboard_run as gca_tensorboard_run -from google.cloud.aiplatform_v1.types import tensorboard_service -from google.cloud.aiplatform_v1.types import tensorboard_time_series -from google.cloud.aiplatform_v1.types import tensorboard_time_series as gca_tensorboard_time_series -from google.longrunning import operations_pb2 # type: ignore -from .base import TensorboardServiceTransport, DEFAULT_CLIENT_INFO - - -class TensorboardServiceGrpcTransport(TensorboardServiceTransport): - """gRPC backend transport for TensorboardService. - - TensorboardService - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_tensorboard(self) -> Callable[ - [tensorboard_service.CreateTensorboardRequest], - operations_pb2.Operation]: - r"""Return a callable for the create tensorboard method over gRPC. - - Creates a Tensorboard. - - Returns: - Callable[[~.CreateTensorboardRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_tensorboard' not in self._stubs: - self._stubs['create_tensorboard'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.TensorboardService/CreateTensorboard', - request_serializer=tensorboard_service.CreateTensorboardRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_tensorboard'] - - @property - def get_tensorboard(self) -> Callable[ - [tensorboard_service.GetTensorboardRequest], - tensorboard.Tensorboard]: - r"""Return a callable for the get tensorboard method over gRPC. - - Gets a Tensorboard. - - Returns: - Callable[[~.GetTensorboardRequest], - ~.Tensorboard]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_tensorboard' not in self._stubs: - self._stubs['get_tensorboard'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.TensorboardService/GetTensorboard', - request_serializer=tensorboard_service.GetTensorboardRequest.serialize, - response_deserializer=tensorboard.Tensorboard.deserialize, - ) - return self._stubs['get_tensorboard'] - - @property - def update_tensorboard(self) -> Callable[ - [tensorboard_service.UpdateTensorboardRequest], - operations_pb2.Operation]: - r"""Return a callable for the update tensorboard method over gRPC. - - Updates a Tensorboard. - - Returns: - Callable[[~.UpdateTensorboardRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_tensorboard' not in self._stubs: - self._stubs['update_tensorboard'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.TensorboardService/UpdateTensorboard', - request_serializer=tensorboard_service.UpdateTensorboardRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['update_tensorboard'] - - @property - def list_tensorboards(self) -> Callable[ - [tensorboard_service.ListTensorboardsRequest], - tensorboard_service.ListTensorboardsResponse]: - r"""Return a callable for the list tensorboards method over gRPC. - - Lists Tensorboards in a Location. - - Returns: - Callable[[~.ListTensorboardsRequest], - ~.ListTensorboardsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_tensorboards' not in self._stubs: - self._stubs['list_tensorboards'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.TensorboardService/ListTensorboards', - request_serializer=tensorboard_service.ListTensorboardsRequest.serialize, - response_deserializer=tensorboard_service.ListTensorboardsResponse.deserialize, - ) - return self._stubs['list_tensorboards'] - - @property - def delete_tensorboard(self) -> Callable[ - [tensorboard_service.DeleteTensorboardRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete tensorboard method over gRPC. - - Deletes a Tensorboard. - - Returns: - Callable[[~.DeleteTensorboardRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_tensorboard' not in self._stubs: - self._stubs['delete_tensorboard'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.TensorboardService/DeleteTensorboard', - request_serializer=tensorboard_service.DeleteTensorboardRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_tensorboard'] - - @property - def create_tensorboard_experiment(self) -> Callable[ - [tensorboard_service.CreateTensorboardExperimentRequest], - gca_tensorboard_experiment.TensorboardExperiment]: - r"""Return a callable for the create tensorboard experiment method over gRPC. - - Creates a TensorboardExperiment. - - Returns: - Callable[[~.CreateTensorboardExperimentRequest], - ~.TensorboardExperiment]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_tensorboard_experiment' not in self._stubs: - self._stubs['create_tensorboard_experiment'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.TensorboardService/CreateTensorboardExperiment', - request_serializer=tensorboard_service.CreateTensorboardExperimentRequest.serialize, - response_deserializer=gca_tensorboard_experiment.TensorboardExperiment.deserialize, - ) - return self._stubs['create_tensorboard_experiment'] - - @property - def get_tensorboard_experiment(self) -> Callable[ - [tensorboard_service.GetTensorboardExperimentRequest], - tensorboard_experiment.TensorboardExperiment]: - r"""Return a callable for the get tensorboard experiment method over gRPC. - - Gets a TensorboardExperiment. - - Returns: - Callable[[~.GetTensorboardExperimentRequest], - ~.TensorboardExperiment]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_tensorboard_experiment' not in self._stubs: - self._stubs['get_tensorboard_experiment'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.TensorboardService/GetTensorboardExperiment', - request_serializer=tensorboard_service.GetTensorboardExperimentRequest.serialize, - response_deserializer=tensorboard_experiment.TensorboardExperiment.deserialize, - ) - return self._stubs['get_tensorboard_experiment'] - - @property - def update_tensorboard_experiment(self) -> Callable[ - [tensorboard_service.UpdateTensorboardExperimentRequest], - gca_tensorboard_experiment.TensorboardExperiment]: - r"""Return a callable for the update tensorboard experiment method over gRPC. - - Updates a TensorboardExperiment. - - Returns: - Callable[[~.UpdateTensorboardExperimentRequest], - ~.TensorboardExperiment]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_tensorboard_experiment' not in self._stubs: - self._stubs['update_tensorboard_experiment'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.TensorboardService/UpdateTensorboardExperiment', - request_serializer=tensorboard_service.UpdateTensorboardExperimentRequest.serialize, - response_deserializer=gca_tensorboard_experiment.TensorboardExperiment.deserialize, - ) - return self._stubs['update_tensorboard_experiment'] - - @property - def list_tensorboard_experiments(self) -> Callable[ - [tensorboard_service.ListTensorboardExperimentsRequest], - tensorboard_service.ListTensorboardExperimentsResponse]: - r"""Return a callable for the list tensorboard experiments method over gRPC. - - Lists TensorboardExperiments in a Location. - - Returns: - Callable[[~.ListTensorboardExperimentsRequest], - ~.ListTensorboardExperimentsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_tensorboard_experiments' not in self._stubs: - self._stubs['list_tensorboard_experiments'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.TensorboardService/ListTensorboardExperiments', - request_serializer=tensorboard_service.ListTensorboardExperimentsRequest.serialize, - response_deserializer=tensorboard_service.ListTensorboardExperimentsResponse.deserialize, - ) - return self._stubs['list_tensorboard_experiments'] - - @property - def delete_tensorboard_experiment(self) -> Callable[ - [tensorboard_service.DeleteTensorboardExperimentRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete tensorboard experiment method over gRPC. - - Deletes a TensorboardExperiment. - - Returns: - Callable[[~.DeleteTensorboardExperimentRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_tensorboard_experiment' not in self._stubs: - self._stubs['delete_tensorboard_experiment'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.TensorboardService/DeleteTensorboardExperiment', - request_serializer=tensorboard_service.DeleteTensorboardExperimentRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_tensorboard_experiment'] - - @property - def create_tensorboard_run(self) -> Callable[ - [tensorboard_service.CreateTensorboardRunRequest], - gca_tensorboard_run.TensorboardRun]: - r"""Return a callable for the create tensorboard run method over gRPC. - - Creates a TensorboardRun. - - Returns: - Callable[[~.CreateTensorboardRunRequest], - ~.TensorboardRun]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_tensorboard_run' not in self._stubs: - self._stubs['create_tensorboard_run'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.TensorboardService/CreateTensorboardRun', - request_serializer=tensorboard_service.CreateTensorboardRunRequest.serialize, - response_deserializer=gca_tensorboard_run.TensorboardRun.deserialize, - ) - return self._stubs['create_tensorboard_run'] - - @property - def batch_create_tensorboard_runs(self) -> Callable[ - [tensorboard_service.BatchCreateTensorboardRunsRequest], - tensorboard_service.BatchCreateTensorboardRunsResponse]: - r"""Return a callable for the batch create tensorboard runs method over gRPC. - - Batch create TensorboardRuns. - - Returns: - Callable[[~.BatchCreateTensorboardRunsRequest], - ~.BatchCreateTensorboardRunsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'batch_create_tensorboard_runs' not in self._stubs: - self._stubs['batch_create_tensorboard_runs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.TensorboardService/BatchCreateTensorboardRuns', - request_serializer=tensorboard_service.BatchCreateTensorboardRunsRequest.serialize, - response_deserializer=tensorboard_service.BatchCreateTensorboardRunsResponse.deserialize, - ) - return self._stubs['batch_create_tensorboard_runs'] - - @property - def get_tensorboard_run(self) -> Callable[ - [tensorboard_service.GetTensorboardRunRequest], - tensorboard_run.TensorboardRun]: - r"""Return a callable for the get tensorboard run method over gRPC. - - Gets a TensorboardRun. - - Returns: - Callable[[~.GetTensorboardRunRequest], - ~.TensorboardRun]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_tensorboard_run' not in self._stubs: - self._stubs['get_tensorboard_run'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.TensorboardService/GetTensorboardRun', - request_serializer=tensorboard_service.GetTensorboardRunRequest.serialize, - response_deserializer=tensorboard_run.TensorboardRun.deserialize, - ) - return self._stubs['get_tensorboard_run'] - - @property - def update_tensorboard_run(self) -> Callable[ - [tensorboard_service.UpdateTensorboardRunRequest], - gca_tensorboard_run.TensorboardRun]: - r"""Return a callable for the update tensorboard run method over gRPC. - - Updates a TensorboardRun. - - Returns: - Callable[[~.UpdateTensorboardRunRequest], - ~.TensorboardRun]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_tensorboard_run' not in self._stubs: - self._stubs['update_tensorboard_run'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.TensorboardService/UpdateTensorboardRun', - request_serializer=tensorboard_service.UpdateTensorboardRunRequest.serialize, - response_deserializer=gca_tensorboard_run.TensorboardRun.deserialize, - ) - return self._stubs['update_tensorboard_run'] - - @property - def list_tensorboard_runs(self) -> Callable[ - [tensorboard_service.ListTensorboardRunsRequest], - tensorboard_service.ListTensorboardRunsResponse]: - r"""Return a callable for the list tensorboard runs method over gRPC. - - Lists TensorboardRuns in a Location. - - Returns: - Callable[[~.ListTensorboardRunsRequest], - ~.ListTensorboardRunsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_tensorboard_runs' not in self._stubs: - self._stubs['list_tensorboard_runs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.TensorboardService/ListTensorboardRuns', - request_serializer=tensorboard_service.ListTensorboardRunsRequest.serialize, - response_deserializer=tensorboard_service.ListTensorboardRunsResponse.deserialize, - ) - return self._stubs['list_tensorboard_runs'] - - @property - def delete_tensorboard_run(self) -> Callable[ - [tensorboard_service.DeleteTensorboardRunRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete tensorboard run method over gRPC. - - Deletes a TensorboardRun. - - Returns: - Callable[[~.DeleteTensorboardRunRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_tensorboard_run' not in self._stubs: - self._stubs['delete_tensorboard_run'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.TensorboardService/DeleteTensorboardRun', - request_serializer=tensorboard_service.DeleteTensorboardRunRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_tensorboard_run'] - - @property - def batch_create_tensorboard_time_series(self) -> Callable[ - [tensorboard_service.BatchCreateTensorboardTimeSeriesRequest], - tensorboard_service.BatchCreateTensorboardTimeSeriesResponse]: - r"""Return a callable for the batch create tensorboard time - series method over gRPC. - - Batch create TensorboardTimeSeries that belong to a - TensorboardExperiment. - - Returns: - Callable[[~.BatchCreateTensorboardTimeSeriesRequest], - ~.BatchCreateTensorboardTimeSeriesResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'batch_create_tensorboard_time_series' not in self._stubs: - self._stubs['batch_create_tensorboard_time_series'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.TensorboardService/BatchCreateTensorboardTimeSeries', - request_serializer=tensorboard_service.BatchCreateTensorboardTimeSeriesRequest.serialize, - response_deserializer=tensorboard_service.BatchCreateTensorboardTimeSeriesResponse.deserialize, - ) - return self._stubs['batch_create_tensorboard_time_series'] - - @property - def create_tensorboard_time_series(self) -> Callable[ - [tensorboard_service.CreateTensorboardTimeSeriesRequest], - gca_tensorboard_time_series.TensorboardTimeSeries]: - r"""Return a callable for the create tensorboard time series method over gRPC. - - Creates a TensorboardTimeSeries. - - Returns: - Callable[[~.CreateTensorboardTimeSeriesRequest], - ~.TensorboardTimeSeries]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_tensorboard_time_series' not in self._stubs: - self._stubs['create_tensorboard_time_series'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.TensorboardService/CreateTensorboardTimeSeries', - request_serializer=tensorboard_service.CreateTensorboardTimeSeriesRequest.serialize, - response_deserializer=gca_tensorboard_time_series.TensorboardTimeSeries.deserialize, - ) - return self._stubs['create_tensorboard_time_series'] - - @property - def get_tensorboard_time_series(self) -> Callable[ - [tensorboard_service.GetTensorboardTimeSeriesRequest], - tensorboard_time_series.TensorboardTimeSeries]: - r"""Return a callable for the get tensorboard time series method over gRPC. - - Gets a TensorboardTimeSeries. - - Returns: - Callable[[~.GetTensorboardTimeSeriesRequest], - ~.TensorboardTimeSeries]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_tensorboard_time_series' not in self._stubs: - self._stubs['get_tensorboard_time_series'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.TensorboardService/GetTensorboardTimeSeries', - request_serializer=tensorboard_service.GetTensorboardTimeSeriesRequest.serialize, - response_deserializer=tensorboard_time_series.TensorboardTimeSeries.deserialize, - ) - return self._stubs['get_tensorboard_time_series'] - - @property - def update_tensorboard_time_series(self) -> Callable[ - [tensorboard_service.UpdateTensorboardTimeSeriesRequest], - gca_tensorboard_time_series.TensorboardTimeSeries]: - r"""Return a callable for the update tensorboard time series method over gRPC. - - Updates a TensorboardTimeSeries. - - Returns: - Callable[[~.UpdateTensorboardTimeSeriesRequest], - ~.TensorboardTimeSeries]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_tensorboard_time_series' not in self._stubs: - self._stubs['update_tensorboard_time_series'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.TensorboardService/UpdateTensorboardTimeSeries', - request_serializer=tensorboard_service.UpdateTensorboardTimeSeriesRequest.serialize, - response_deserializer=gca_tensorboard_time_series.TensorboardTimeSeries.deserialize, - ) - return self._stubs['update_tensorboard_time_series'] - - @property - def list_tensorboard_time_series(self) -> Callable[ - [tensorboard_service.ListTensorboardTimeSeriesRequest], - tensorboard_service.ListTensorboardTimeSeriesResponse]: - r"""Return a callable for the list tensorboard time series method over gRPC. - - Lists TensorboardTimeSeries in a Location. - - Returns: - Callable[[~.ListTensorboardTimeSeriesRequest], - ~.ListTensorboardTimeSeriesResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_tensorboard_time_series' not in self._stubs: - self._stubs['list_tensorboard_time_series'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.TensorboardService/ListTensorboardTimeSeries', - request_serializer=tensorboard_service.ListTensorboardTimeSeriesRequest.serialize, - response_deserializer=tensorboard_service.ListTensorboardTimeSeriesResponse.deserialize, - ) - return self._stubs['list_tensorboard_time_series'] - - @property - def delete_tensorboard_time_series(self) -> Callable[ - [tensorboard_service.DeleteTensorboardTimeSeriesRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete tensorboard time series method over gRPC. - - Deletes a TensorboardTimeSeries. - - Returns: - Callable[[~.DeleteTensorboardTimeSeriesRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_tensorboard_time_series' not in self._stubs: - self._stubs['delete_tensorboard_time_series'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.TensorboardService/DeleteTensorboardTimeSeries', - request_serializer=tensorboard_service.DeleteTensorboardTimeSeriesRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_tensorboard_time_series'] - - @property - def batch_read_tensorboard_time_series_data(self) -> Callable[ - [tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest], - tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse]: - r"""Return a callable for the batch read tensorboard time - series data method over gRPC. - - Reads multiple TensorboardTimeSeries' data. The data - point number limit is 1000 for scalars, 100 for tensors - and blob references. If the number of data points stored - is less than the limit, all data will be returned. - Otherwise, that limit number of data points will be - randomly selected from this time series and returned. - - Returns: - Callable[[~.BatchReadTensorboardTimeSeriesDataRequest], - ~.BatchReadTensorboardTimeSeriesDataResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'batch_read_tensorboard_time_series_data' not in self._stubs: - self._stubs['batch_read_tensorboard_time_series_data'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.TensorboardService/BatchReadTensorboardTimeSeriesData', - request_serializer=tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest.serialize, - response_deserializer=tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse.deserialize, - ) - return self._stubs['batch_read_tensorboard_time_series_data'] - - @property - def read_tensorboard_time_series_data(self) -> Callable[ - [tensorboard_service.ReadTensorboardTimeSeriesDataRequest], - tensorboard_service.ReadTensorboardTimeSeriesDataResponse]: - r"""Return a callable for the read tensorboard time series - data method over gRPC. - - Reads a TensorboardTimeSeries' data. By default, if the number - of data points stored is less than 1000, all data will be - returned. Otherwise, 1000 data points will be randomly selected - from this time series and returned. This value can be changed by - changing max_data_points, which can't be greater than 10k. - - Returns: - Callable[[~.ReadTensorboardTimeSeriesDataRequest], - ~.ReadTensorboardTimeSeriesDataResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'read_tensorboard_time_series_data' not in self._stubs: - self._stubs['read_tensorboard_time_series_data'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.TensorboardService/ReadTensorboardTimeSeriesData', - request_serializer=tensorboard_service.ReadTensorboardTimeSeriesDataRequest.serialize, - response_deserializer=tensorboard_service.ReadTensorboardTimeSeriesDataResponse.deserialize, - ) - return self._stubs['read_tensorboard_time_series_data'] - - @property - def read_tensorboard_blob_data(self) -> Callable[ - [tensorboard_service.ReadTensorboardBlobDataRequest], - tensorboard_service.ReadTensorboardBlobDataResponse]: - r"""Return a callable for the read tensorboard blob data method over gRPC. - - Gets bytes of TensorboardBlobs. - This is to allow reading blob data stored in consumer - project's Cloud Storage bucket without users having to - obtain Cloud Storage access permission. - - Returns: - Callable[[~.ReadTensorboardBlobDataRequest], - ~.ReadTensorboardBlobDataResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'read_tensorboard_blob_data' not in self._stubs: - self._stubs['read_tensorboard_blob_data'] = self.grpc_channel.unary_stream( - '/google.cloud.aiplatform.v1.TensorboardService/ReadTensorboardBlobData', - request_serializer=tensorboard_service.ReadTensorboardBlobDataRequest.serialize, - response_deserializer=tensorboard_service.ReadTensorboardBlobDataResponse.deserialize, - ) - return self._stubs['read_tensorboard_blob_data'] - - @property - def write_tensorboard_experiment_data(self) -> Callable[ - [tensorboard_service.WriteTensorboardExperimentDataRequest], - tensorboard_service.WriteTensorboardExperimentDataResponse]: - r"""Return a callable for the write tensorboard experiment - data method over gRPC. - - Write time series data points of multiple - TensorboardTimeSeries in multiple TensorboardRun's. If - any data fail to be ingested, an error will be returned. - - Returns: - Callable[[~.WriteTensorboardExperimentDataRequest], - ~.WriteTensorboardExperimentDataResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'write_tensorboard_experiment_data' not in self._stubs: - self._stubs['write_tensorboard_experiment_data'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.TensorboardService/WriteTensorboardExperimentData', - request_serializer=tensorboard_service.WriteTensorboardExperimentDataRequest.serialize, - response_deserializer=tensorboard_service.WriteTensorboardExperimentDataResponse.deserialize, - ) - return self._stubs['write_tensorboard_experiment_data'] - - @property - def write_tensorboard_run_data(self) -> Callable[ - [tensorboard_service.WriteTensorboardRunDataRequest], - tensorboard_service.WriteTensorboardRunDataResponse]: - r"""Return a callable for the write tensorboard run data method over gRPC. - - Write time series data points into multiple - TensorboardTimeSeries under a TensorboardRun. If any - data fail to be ingested, an error will be returned. - - Returns: - Callable[[~.WriteTensorboardRunDataRequest], - ~.WriteTensorboardRunDataResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'write_tensorboard_run_data' not in self._stubs: - self._stubs['write_tensorboard_run_data'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.TensorboardService/WriteTensorboardRunData', - request_serializer=tensorboard_service.WriteTensorboardRunDataRequest.serialize, - response_deserializer=tensorboard_service.WriteTensorboardRunDataResponse.deserialize, - ) - return self._stubs['write_tensorboard_run_data'] - - @property - def export_tensorboard_time_series_data(self) -> Callable[ - [tensorboard_service.ExportTensorboardTimeSeriesDataRequest], - tensorboard_service.ExportTensorboardTimeSeriesDataResponse]: - r"""Return a callable for the export tensorboard time series - data method over gRPC. - - Exports a TensorboardTimeSeries' data. Data is - returned in paginated responses. - - Returns: - Callable[[~.ExportTensorboardTimeSeriesDataRequest], - ~.ExportTensorboardTimeSeriesDataResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'export_tensorboard_time_series_data' not in self._stubs: - self._stubs['export_tensorboard_time_series_data'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.TensorboardService/ExportTensorboardTimeSeriesData', - request_serializer=tensorboard_service.ExportTensorboardTimeSeriesDataRequest.serialize, - response_deserializer=tensorboard_service.ExportTensorboardTimeSeriesDataResponse.deserialize, - ) - return self._stubs['export_tensorboard_time_series_data'] - - def close(self): - self.grpc_channel.close() - -__all__ = ( - 'TensorboardServiceGrpcTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/transports/grpc_asyncio.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/transports/grpc_asyncio.py deleted file mode 100644 index d563bb235b..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/tensorboard_service/transports/grpc_asyncio.py +++ /dev/null @@ -1,1009 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers_async -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.aiplatform_v1.types import tensorboard -from google.cloud.aiplatform_v1.types import tensorboard_experiment -from google.cloud.aiplatform_v1.types import tensorboard_experiment as gca_tensorboard_experiment -from google.cloud.aiplatform_v1.types import tensorboard_run -from google.cloud.aiplatform_v1.types import tensorboard_run as gca_tensorboard_run -from google.cloud.aiplatform_v1.types import tensorboard_service -from google.cloud.aiplatform_v1.types import tensorboard_time_series -from google.cloud.aiplatform_v1.types import tensorboard_time_series as gca_tensorboard_time_series -from google.longrunning import operations_pb2 # type: ignore -from .base import TensorboardServiceTransport, DEFAULT_CLIENT_INFO -from .grpc import TensorboardServiceGrpcTransport - - -class TensorboardServiceGrpcAsyncIOTransport(TensorboardServiceTransport): - """gRPC AsyncIO backend transport for TensorboardService. - - TensorboardService - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsAsyncClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_tensorboard(self) -> Callable[ - [tensorboard_service.CreateTensorboardRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the create tensorboard method over gRPC. - - Creates a Tensorboard. - - Returns: - Callable[[~.CreateTensorboardRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_tensorboard' not in self._stubs: - self._stubs['create_tensorboard'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.TensorboardService/CreateTensorboard', - request_serializer=tensorboard_service.CreateTensorboardRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_tensorboard'] - - @property - def get_tensorboard(self) -> Callable[ - [tensorboard_service.GetTensorboardRequest], - Awaitable[tensorboard.Tensorboard]]: - r"""Return a callable for the get tensorboard method over gRPC. - - Gets a Tensorboard. - - Returns: - Callable[[~.GetTensorboardRequest], - Awaitable[~.Tensorboard]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_tensorboard' not in self._stubs: - self._stubs['get_tensorboard'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.TensorboardService/GetTensorboard', - request_serializer=tensorboard_service.GetTensorboardRequest.serialize, - response_deserializer=tensorboard.Tensorboard.deserialize, - ) - return self._stubs['get_tensorboard'] - - @property - def update_tensorboard(self) -> Callable[ - [tensorboard_service.UpdateTensorboardRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the update tensorboard method over gRPC. - - Updates a Tensorboard. - - Returns: - Callable[[~.UpdateTensorboardRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_tensorboard' not in self._stubs: - self._stubs['update_tensorboard'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.TensorboardService/UpdateTensorboard', - request_serializer=tensorboard_service.UpdateTensorboardRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['update_tensorboard'] - - @property - def list_tensorboards(self) -> Callable[ - [tensorboard_service.ListTensorboardsRequest], - Awaitable[tensorboard_service.ListTensorboardsResponse]]: - r"""Return a callable for the list tensorboards method over gRPC. - - Lists Tensorboards in a Location. - - Returns: - Callable[[~.ListTensorboardsRequest], - Awaitable[~.ListTensorboardsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_tensorboards' not in self._stubs: - self._stubs['list_tensorboards'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.TensorboardService/ListTensorboards', - request_serializer=tensorboard_service.ListTensorboardsRequest.serialize, - response_deserializer=tensorboard_service.ListTensorboardsResponse.deserialize, - ) - return self._stubs['list_tensorboards'] - - @property - def delete_tensorboard(self) -> Callable[ - [tensorboard_service.DeleteTensorboardRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete tensorboard method over gRPC. - - Deletes a Tensorboard. - - Returns: - Callable[[~.DeleteTensorboardRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_tensorboard' not in self._stubs: - self._stubs['delete_tensorboard'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.TensorboardService/DeleteTensorboard', - request_serializer=tensorboard_service.DeleteTensorboardRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_tensorboard'] - - @property - def create_tensorboard_experiment(self) -> Callable[ - [tensorboard_service.CreateTensorboardExperimentRequest], - Awaitable[gca_tensorboard_experiment.TensorboardExperiment]]: - r"""Return a callable for the create tensorboard experiment method over gRPC. - - Creates a TensorboardExperiment. - - Returns: - Callable[[~.CreateTensorboardExperimentRequest], - Awaitable[~.TensorboardExperiment]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_tensorboard_experiment' not in self._stubs: - self._stubs['create_tensorboard_experiment'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.TensorboardService/CreateTensorboardExperiment', - request_serializer=tensorboard_service.CreateTensorboardExperimentRequest.serialize, - response_deserializer=gca_tensorboard_experiment.TensorboardExperiment.deserialize, - ) - return self._stubs['create_tensorboard_experiment'] - - @property - def get_tensorboard_experiment(self) -> Callable[ - [tensorboard_service.GetTensorboardExperimentRequest], - Awaitable[tensorboard_experiment.TensorboardExperiment]]: - r"""Return a callable for the get tensorboard experiment method over gRPC. - - Gets a TensorboardExperiment. - - Returns: - Callable[[~.GetTensorboardExperimentRequest], - Awaitable[~.TensorboardExperiment]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_tensorboard_experiment' not in self._stubs: - self._stubs['get_tensorboard_experiment'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.TensorboardService/GetTensorboardExperiment', - request_serializer=tensorboard_service.GetTensorboardExperimentRequest.serialize, - response_deserializer=tensorboard_experiment.TensorboardExperiment.deserialize, - ) - return self._stubs['get_tensorboard_experiment'] - - @property - def update_tensorboard_experiment(self) -> Callable[ - [tensorboard_service.UpdateTensorboardExperimentRequest], - Awaitable[gca_tensorboard_experiment.TensorboardExperiment]]: - r"""Return a callable for the update tensorboard experiment method over gRPC. - - Updates a TensorboardExperiment. - - Returns: - Callable[[~.UpdateTensorboardExperimentRequest], - Awaitable[~.TensorboardExperiment]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_tensorboard_experiment' not in self._stubs: - self._stubs['update_tensorboard_experiment'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.TensorboardService/UpdateTensorboardExperiment', - request_serializer=tensorboard_service.UpdateTensorboardExperimentRequest.serialize, - response_deserializer=gca_tensorboard_experiment.TensorboardExperiment.deserialize, - ) - return self._stubs['update_tensorboard_experiment'] - - @property - def list_tensorboard_experiments(self) -> Callable[ - [tensorboard_service.ListTensorboardExperimentsRequest], - Awaitable[tensorboard_service.ListTensorboardExperimentsResponse]]: - r"""Return a callable for the list tensorboard experiments method over gRPC. - - Lists TensorboardExperiments in a Location. - - Returns: - Callable[[~.ListTensorboardExperimentsRequest], - Awaitable[~.ListTensorboardExperimentsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_tensorboard_experiments' not in self._stubs: - self._stubs['list_tensorboard_experiments'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.TensorboardService/ListTensorboardExperiments', - request_serializer=tensorboard_service.ListTensorboardExperimentsRequest.serialize, - response_deserializer=tensorboard_service.ListTensorboardExperimentsResponse.deserialize, - ) - return self._stubs['list_tensorboard_experiments'] - - @property - def delete_tensorboard_experiment(self) -> Callable[ - [tensorboard_service.DeleteTensorboardExperimentRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete tensorboard experiment method over gRPC. - - Deletes a TensorboardExperiment. - - Returns: - Callable[[~.DeleteTensorboardExperimentRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_tensorboard_experiment' not in self._stubs: - self._stubs['delete_tensorboard_experiment'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.TensorboardService/DeleteTensorboardExperiment', - request_serializer=tensorboard_service.DeleteTensorboardExperimentRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_tensorboard_experiment'] - - @property - def create_tensorboard_run(self) -> Callable[ - [tensorboard_service.CreateTensorboardRunRequest], - Awaitable[gca_tensorboard_run.TensorboardRun]]: - r"""Return a callable for the create tensorboard run method over gRPC. - - Creates a TensorboardRun. - - Returns: - Callable[[~.CreateTensorboardRunRequest], - Awaitable[~.TensorboardRun]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_tensorboard_run' not in self._stubs: - self._stubs['create_tensorboard_run'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.TensorboardService/CreateTensorboardRun', - request_serializer=tensorboard_service.CreateTensorboardRunRequest.serialize, - response_deserializer=gca_tensorboard_run.TensorboardRun.deserialize, - ) - return self._stubs['create_tensorboard_run'] - - @property - def batch_create_tensorboard_runs(self) -> Callable[ - [tensorboard_service.BatchCreateTensorboardRunsRequest], - Awaitable[tensorboard_service.BatchCreateTensorboardRunsResponse]]: - r"""Return a callable for the batch create tensorboard runs method over gRPC. - - Batch create TensorboardRuns. - - Returns: - Callable[[~.BatchCreateTensorboardRunsRequest], - Awaitable[~.BatchCreateTensorboardRunsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'batch_create_tensorboard_runs' not in self._stubs: - self._stubs['batch_create_tensorboard_runs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.TensorboardService/BatchCreateTensorboardRuns', - request_serializer=tensorboard_service.BatchCreateTensorboardRunsRequest.serialize, - response_deserializer=tensorboard_service.BatchCreateTensorboardRunsResponse.deserialize, - ) - return self._stubs['batch_create_tensorboard_runs'] - - @property - def get_tensorboard_run(self) -> Callable[ - [tensorboard_service.GetTensorboardRunRequest], - Awaitable[tensorboard_run.TensorboardRun]]: - r"""Return a callable for the get tensorboard run method over gRPC. - - Gets a TensorboardRun. - - Returns: - Callable[[~.GetTensorboardRunRequest], - Awaitable[~.TensorboardRun]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_tensorboard_run' not in self._stubs: - self._stubs['get_tensorboard_run'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.TensorboardService/GetTensorboardRun', - request_serializer=tensorboard_service.GetTensorboardRunRequest.serialize, - response_deserializer=tensorboard_run.TensorboardRun.deserialize, - ) - return self._stubs['get_tensorboard_run'] - - @property - def update_tensorboard_run(self) -> Callable[ - [tensorboard_service.UpdateTensorboardRunRequest], - Awaitable[gca_tensorboard_run.TensorboardRun]]: - r"""Return a callable for the update tensorboard run method over gRPC. - - Updates a TensorboardRun. - - Returns: - Callable[[~.UpdateTensorboardRunRequest], - Awaitable[~.TensorboardRun]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_tensorboard_run' not in self._stubs: - self._stubs['update_tensorboard_run'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.TensorboardService/UpdateTensorboardRun', - request_serializer=tensorboard_service.UpdateTensorboardRunRequest.serialize, - response_deserializer=gca_tensorboard_run.TensorboardRun.deserialize, - ) - return self._stubs['update_tensorboard_run'] - - @property - def list_tensorboard_runs(self) -> Callable[ - [tensorboard_service.ListTensorboardRunsRequest], - Awaitable[tensorboard_service.ListTensorboardRunsResponse]]: - r"""Return a callable for the list tensorboard runs method over gRPC. - - Lists TensorboardRuns in a Location. - - Returns: - Callable[[~.ListTensorboardRunsRequest], - Awaitable[~.ListTensorboardRunsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_tensorboard_runs' not in self._stubs: - self._stubs['list_tensorboard_runs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.TensorboardService/ListTensorboardRuns', - request_serializer=tensorboard_service.ListTensorboardRunsRequest.serialize, - response_deserializer=tensorboard_service.ListTensorboardRunsResponse.deserialize, - ) - return self._stubs['list_tensorboard_runs'] - - @property - def delete_tensorboard_run(self) -> Callable[ - [tensorboard_service.DeleteTensorboardRunRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete tensorboard run method over gRPC. - - Deletes a TensorboardRun. - - Returns: - Callable[[~.DeleteTensorboardRunRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_tensorboard_run' not in self._stubs: - self._stubs['delete_tensorboard_run'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.TensorboardService/DeleteTensorboardRun', - request_serializer=tensorboard_service.DeleteTensorboardRunRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_tensorboard_run'] - - @property - def batch_create_tensorboard_time_series(self) -> Callable[ - [tensorboard_service.BatchCreateTensorboardTimeSeriesRequest], - Awaitable[tensorboard_service.BatchCreateTensorboardTimeSeriesResponse]]: - r"""Return a callable for the batch create tensorboard time - series method over gRPC. - - Batch create TensorboardTimeSeries that belong to a - TensorboardExperiment. - - Returns: - Callable[[~.BatchCreateTensorboardTimeSeriesRequest], - Awaitable[~.BatchCreateTensorboardTimeSeriesResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'batch_create_tensorboard_time_series' not in self._stubs: - self._stubs['batch_create_tensorboard_time_series'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.TensorboardService/BatchCreateTensorboardTimeSeries', - request_serializer=tensorboard_service.BatchCreateTensorboardTimeSeriesRequest.serialize, - response_deserializer=tensorboard_service.BatchCreateTensorboardTimeSeriesResponse.deserialize, - ) - return self._stubs['batch_create_tensorboard_time_series'] - - @property - def create_tensorboard_time_series(self) -> Callable[ - [tensorboard_service.CreateTensorboardTimeSeriesRequest], - Awaitable[gca_tensorboard_time_series.TensorboardTimeSeries]]: - r"""Return a callable for the create tensorboard time series method over gRPC. - - Creates a TensorboardTimeSeries. - - Returns: - Callable[[~.CreateTensorboardTimeSeriesRequest], - Awaitable[~.TensorboardTimeSeries]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_tensorboard_time_series' not in self._stubs: - self._stubs['create_tensorboard_time_series'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.TensorboardService/CreateTensorboardTimeSeries', - request_serializer=tensorboard_service.CreateTensorboardTimeSeriesRequest.serialize, - response_deserializer=gca_tensorboard_time_series.TensorboardTimeSeries.deserialize, - ) - return self._stubs['create_tensorboard_time_series'] - - @property - def get_tensorboard_time_series(self) -> Callable[ - [tensorboard_service.GetTensorboardTimeSeriesRequest], - Awaitable[tensorboard_time_series.TensorboardTimeSeries]]: - r"""Return a callable for the get tensorboard time series method over gRPC. - - Gets a TensorboardTimeSeries. - - Returns: - Callable[[~.GetTensorboardTimeSeriesRequest], - Awaitable[~.TensorboardTimeSeries]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_tensorboard_time_series' not in self._stubs: - self._stubs['get_tensorboard_time_series'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.TensorboardService/GetTensorboardTimeSeries', - request_serializer=tensorboard_service.GetTensorboardTimeSeriesRequest.serialize, - response_deserializer=tensorboard_time_series.TensorboardTimeSeries.deserialize, - ) - return self._stubs['get_tensorboard_time_series'] - - @property - def update_tensorboard_time_series(self) -> Callable[ - [tensorboard_service.UpdateTensorboardTimeSeriesRequest], - Awaitable[gca_tensorboard_time_series.TensorboardTimeSeries]]: - r"""Return a callable for the update tensorboard time series method over gRPC. - - Updates a TensorboardTimeSeries. - - Returns: - Callable[[~.UpdateTensorboardTimeSeriesRequest], - Awaitable[~.TensorboardTimeSeries]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_tensorboard_time_series' not in self._stubs: - self._stubs['update_tensorboard_time_series'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.TensorboardService/UpdateTensorboardTimeSeries', - request_serializer=tensorboard_service.UpdateTensorboardTimeSeriesRequest.serialize, - response_deserializer=gca_tensorboard_time_series.TensorboardTimeSeries.deserialize, - ) - return self._stubs['update_tensorboard_time_series'] - - @property - def list_tensorboard_time_series(self) -> Callable[ - [tensorboard_service.ListTensorboardTimeSeriesRequest], - Awaitable[tensorboard_service.ListTensorboardTimeSeriesResponse]]: - r"""Return a callable for the list tensorboard time series method over gRPC. - - Lists TensorboardTimeSeries in a Location. - - Returns: - Callable[[~.ListTensorboardTimeSeriesRequest], - Awaitable[~.ListTensorboardTimeSeriesResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_tensorboard_time_series' not in self._stubs: - self._stubs['list_tensorboard_time_series'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.TensorboardService/ListTensorboardTimeSeries', - request_serializer=tensorboard_service.ListTensorboardTimeSeriesRequest.serialize, - response_deserializer=tensorboard_service.ListTensorboardTimeSeriesResponse.deserialize, - ) - return self._stubs['list_tensorboard_time_series'] - - @property - def delete_tensorboard_time_series(self) -> Callable[ - [tensorboard_service.DeleteTensorboardTimeSeriesRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete tensorboard time series method over gRPC. - - Deletes a TensorboardTimeSeries. - - Returns: - Callable[[~.DeleteTensorboardTimeSeriesRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_tensorboard_time_series' not in self._stubs: - self._stubs['delete_tensorboard_time_series'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.TensorboardService/DeleteTensorboardTimeSeries', - request_serializer=tensorboard_service.DeleteTensorboardTimeSeriesRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_tensorboard_time_series'] - - @property - def batch_read_tensorboard_time_series_data(self) -> Callable[ - [tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest], - Awaitable[tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse]]: - r"""Return a callable for the batch read tensorboard time - series data method over gRPC. - - Reads multiple TensorboardTimeSeries' data. The data - point number limit is 1000 for scalars, 100 for tensors - and blob references. If the number of data points stored - is less than the limit, all data will be returned. - Otherwise, that limit number of data points will be - randomly selected from this time series and returned. - - Returns: - Callable[[~.BatchReadTensorboardTimeSeriesDataRequest], - Awaitable[~.BatchReadTensorboardTimeSeriesDataResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'batch_read_tensorboard_time_series_data' not in self._stubs: - self._stubs['batch_read_tensorboard_time_series_data'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.TensorboardService/BatchReadTensorboardTimeSeriesData', - request_serializer=tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest.serialize, - response_deserializer=tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse.deserialize, - ) - return self._stubs['batch_read_tensorboard_time_series_data'] - - @property - def read_tensorboard_time_series_data(self) -> Callable[ - [tensorboard_service.ReadTensorboardTimeSeriesDataRequest], - Awaitable[tensorboard_service.ReadTensorboardTimeSeriesDataResponse]]: - r"""Return a callable for the read tensorboard time series - data method over gRPC. - - Reads a TensorboardTimeSeries' data. By default, if the number - of data points stored is less than 1000, all data will be - returned. Otherwise, 1000 data points will be randomly selected - from this time series and returned. This value can be changed by - changing max_data_points, which can't be greater than 10k. - - Returns: - Callable[[~.ReadTensorboardTimeSeriesDataRequest], - Awaitable[~.ReadTensorboardTimeSeriesDataResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'read_tensorboard_time_series_data' not in self._stubs: - self._stubs['read_tensorboard_time_series_data'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.TensorboardService/ReadTensorboardTimeSeriesData', - request_serializer=tensorboard_service.ReadTensorboardTimeSeriesDataRequest.serialize, - response_deserializer=tensorboard_service.ReadTensorboardTimeSeriesDataResponse.deserialize, - ) - return self._stubs['read_tensorboard_time_series_data'] - - @property - def read_tensorboard_blob_data(self) -> Callable[ - [tensorboard_service.ReadTensorboardBlobDataRequest], - Awaitable[tensorboard_service.ReadTensorboardBlobDataResponse]]: - r"""Return a callable for the read tensorboard blob data method over gRPC. - - Gets bytes of TensorboardBlobs. - This is to allow reading blob data stored in consumer - project's Cloud Storage bucket without users having to - obtain Cloud Storage access permission. - - Returns: - Callable[[~.ReadTensorboardBlobDataRequest], - Awaitable[~.ReadTensorboardBlobDataResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'read_tensorboard_blob_data' not in self._stubs: - self._stubs['read_tensorboard_blob_data'] = self.grpc_channel.unary_stream( - '/google.cloud.aiplatform.v1.TensorboardService/ReadTensorboardBlobData', - request_serializer=tensorboard_service.ReadTensorboardBlobDataRequest.serialize, - response_deserializer=tensorboard_service.ReadTensorboardBlobDataResponse.deserialize, - ) - return self._stubs['read_tensorboard_blob_data'] - - @property - def write_tensorboard_experiment_data(self) -> Callable[ - [tensorboard_service.WriteTensorboardExperimentDataRequest], - Awaitable[tensorboard_service.WriteTensorboardExperimentDataResponse]]: - r"""Return a callable for the write tensorboard experiment - data method over gRPC. - - Write time series data points of multiple - TensorboardTimeSeries in multiple TensorboardRun's. If - any data fail to be ingested, an error will be returned. - - Returns: - Callable[[~.WriteTensorboardExperimentDataRequest], - Awaitable[~.WriteTensorboardExperimentDataResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'write_tensorboard_experiment_data' not in self._stubs: - self._stubs['write_tensorboard_experiment_data'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.TensorboardService/WriteTensorboardExperimentData', - request_serializer=tensorboard_service.WriteTensorboardExperimentDataRequest.serialize, - response_deserializer=tensorboard_service.WriteTensorboardExperimentDataResponse.deserialize, - ) - return self._stubs['write_tensorboard_experiment_data'] - - @property - def write_tensorboard_run_data(self) -> Callable[ - [tensorboard_service.WriteTensorboardRunDataRequest], - Awaitable[tensorboard_service.WriteTensorboardRunDataResponse]]: - r"""Return a callable for the write tensorboard run data method over gRPC. - - Write time series data points into multiple - TensorboardTimeSeries under a TensorboardRun. If any - data fail to be ingested, an error will be returned. - - Returns: - Callable[[~.WriteTensorboardRunDataRequest], - Awaitable[~.WriteTensorboardRunDataResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'write_tensorboard_run_data' not in self._stubs: - self._stubs['write_tensorboard_run_data'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.TensorboardService/WriteTensorboardRunData', - request_serializer=tensorboard_service.WriteTensorboardRunDataRequest.serialize, - response_deserializer=tensorboard_service.WriteTensorboardRunDataResponse.deserialize, - ) - return self._stubs['write_tensorboard_run_data'] - - @property - def export_tensorboard_time_series_data(self) -> Callable[ - [tensorboard_service.ExportTensorboardTimeSeriesDataRequest], - Awaitable[tensorboard_service.ExportTensorboardTimeSeriesDataResponse]]: - r"""Return a callable for the export tensorboard time series - data method over gRPC. - - Exports a TensorboardTimeSeries' data. Data is - returned in paginated responses. - - Returns: - Callable[[~.ExportTensorboardTimeSeriesDataRequest], - Awaitable[~.ExportTensorboardTimeSeriesDataResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'export_tensorboard_time_series_data' not in self._stubs: - self._stubs['export_tensorboard_time_series_data'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.TensorboardService/ExportTensorboardTimeSeriesData', - request_serializer=tensorboard_service.ExportTensorboardTimeSeriesDataRequest.serialize, - response_deserializer=tensorboard_service.ExportTensorboardTimeSeriesDataResponse.deserialize, - ) - return self._stubs['export_tensorboard_time_series_data'] - - def close(self): - return self.grpc_channel.close() - - -__all__ = ( - 'TensorboardServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/__init__.py deleted file mode 100644 index d629499098..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import VizierServiceClient -from .async_client import VizierServiceAsyncClient - -__all__ = ( - 'VizierServiceClient', - 'VizierServiceAsyncClient', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/async_client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/async_client.py deleted file mode 100644 index 66d026ada6..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/async_client.py +++ /dev/null @@ -1,1292 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core.client_options import ClientOptions -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api_core import operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1.services.vizier_service import pagers -from google.cloud.aiplatform_v1.types import study -from google.cloud.aiplatform_v1.types import study as gca_study -from google.cloud.aiplatform_v1.types import vizier_service -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import VizierServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import VizierServiceGrpcAsyncIOTransport -from .client import VizierServiceClient - - -class VizierServiceAsyncClient: - """Vertex AI Vizier API. - Vertex AI Vizier is a service to solve blackbox optimization - problems, such as tuning machine learning hyperparameters and - searching over deep learning architectures. - """ - - _client: VizierServiceClient - - DEFAULT_ENDPOINT = VizierServiceClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = VizierServiceClient.DEFAULT_MTLS_ENDPOINT - - custom_job_path = staticmethod(VizierServiceClient.custom_job_path) - parse_custom_job_path = staticmethod(VizierServiceClient.parse_custom_job_path) - study_path = staticmethod(VizierServiceClient.study_path) - parse_study_path = staticmethod(VizierServiceClient.parse_study_path) - trial_path = staticmethod(VizierServiceClient.trial_path) - parse_trial_path = staticmethod(VizierServiceClient.parse_trial_path) - common_billing_account_path = staticmethod(VizierServiceClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(VizierServiceClient.parse_common_billing_account_path) - common_folder_path = staticmethod(VizierServiceClient.common_folder_path) - parse_common_folder_path = staticmethod(VizierServiceClient.parse_common_folder_path) - common_organization_path = staticmethod(VizierServiceClient.common_organization_path) - parse_common_organization_path = staticmethod(VizierServiceClient.parse_common_organization_path) - common_project_path = staticmethod(VizierServiceClient.common_project_path) - parse_common_project_path = staticmethod(VizierServiceClient.parse_common_project_path) - common_location_path = staticmethod(VizierServiceClient.common_location_path) - parse_common_location_path = staticmethod(VizierServiceClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - VizierServiceAsyncClient: The constructed client. - """ - return VizierServiceClient.from_service_account_info.__func__(VizierServiceAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - VizierServiceAsyncClient: The constructed client. - """ - return VizierServiceClient.from_service_account_file.__func__(VizierServiceAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> VizierServiceTransport: - """Returns the transport used by the client instance. - - Returns: - VizierServiceTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(VizierServiceClient).get_transport_class, type(VizierServiceClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, VizierServiceTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the vizier service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.VizierServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = VizierServiceClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def create_study(self, - request: Union[vizier_service.CreateStudyRequest, dict] = None, - *, - parent: str = None, - study: gca_study.Study = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_study.Study: - r"""Creates a Study. A resource name will be generated - after creation of the Study. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CreateStudyRequest, dict]): - The request object. Request message for - [VizierService.CreateStudy][google.cloud.aiplatform.v1.VizierService.CreateStudy]. - parent (:class:`str`): - Required. The resource name of the Location to create - the CustomJob in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - study (:class:`google.cloud.aiplatform_v1.types.Study`): - Required. The Study configuration - used to create the Study. - - This corresponds to the ``study`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Study: - LINT.IfChange - A message representing a Study. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, study]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = vizier_service.CreateStudyRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if study is not None: - request.study = study - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_study, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_study(self, - request: Union[vizier_service.GetStudyRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> study.Study: - r"""Gets a Study by name. - - Args: - request (Union[google.cloud.aiplatform_v1.types.GetStudyRequest, dict]): - The request object. Request message for - [VizierService.GetStudy][google.cloud.aiplatform.v1.VizierService.GetStudy]. - name (:class:`str`): - Required. The name of the Study resource. Format: - ``projects/{project}/locations/{location}/studies/{study}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Study: - LINT.IfChange - A message representing a Study. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = vizier_service.GetStudyRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_study, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_studies(self, - request: Union[vizier_service.ListStudiesRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListStudiesAsyncPager: - r"""Lists all the studies in a region for an associated - project. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListStudiesRequest, dict]): - The request object. Request message for - [VizierService.ListStudies][google.cloud.aiplatform.v1.VizierService.ListStudies]. - parent (:class:`str`): - Required. The resource name of the Location to list the - Study from. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.vizier_service.pagers.ListStudiesAsyncPager: - Response message for - [VizierService.ListStudies][google.cloud.aiplatform.v1.VizierService.ListStudies]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = vizier_service.ListStudiesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_studies, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListStudiesAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_study(self, - request: Union[vizier_service.DeleteStudyRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Deletes a Study. - - Args: - request (Union[google.cloud.aiplatform_v1.types.DeleteStudyRequest, dict]): - The request object. Request message for - [VizierService.DeleteStudy][google.cloud.aiplatform.v1.VizierService.DeleteStudy]. - name (:class:`str`): - Required. The name of the Study resource to be deleted. - Format: - ``projects/{project}/locations/{location}/studies/{study}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = vizier_service.DeleteStudyRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_study, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - async def lookup_study(self, - request: Union[vizier_service.LookupStudyRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> study.Study: - r"""Looks a study up using the user-defined display_name field - instead of the fully qualified resource name. - - Args: - request (Union[google.cloud.aiplatform_v1.types.LookupStudyRequest, dict]): - The request object. Request message for - [VizierService.LookupStudy][google.cloud.aiplatform.v1.VizierService.LookupStudy]. - parent (:class:`str`): - Required. The resource name of the Location to get the - Study from. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Study: - LINT.IfChange - A message representing a Study. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = vizier_service.LookupStudyRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.lookup_study, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def suggest_trials(self, - request: Union[vizier_service.SuggestTrialsRequest, dict] = None, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Adds one or more Trials to a Study, with parameter values - suggested by Vertex AI Vizier. Returns a long-running operation - associated with the generation of Trial suggestions. When this - long-running operation succeeds, it will contain a - [SuggestTrialsResponse][google.cloud.ml.v1.SuggestTrialsResponse]. - - Args: - request (Union[google.cloud.aiplatform_v1.types.SuggestTrialsRequest, dict]): - The request object. Request message for - [VizierService.SuggestTrials][google.cloud.aiplatform.v1.VizierService.SuggestTrials]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1.types.SuggestTrialsResponse` - Response message for - [VizierService.SuggestTrials][google.cloud.aiplatform.v1.VizierService.SuggestTrials]. - - """ - # Create or coerce a protobuf request object. - request = vizier_service.SuggestTrialsRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.suggest_trials, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - vizier_service.SuggestTrialsResponse, - metadata_type=vizier_service.SuggestTrialsMetadata, - ) - - # Done; return the response. - return response - - async def create_trial(self, - request: Union[vizier_service.CreateTrialRequest, dict] = None, - *, - parent: str = None, - trial: study.Trial = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> study.Trial: - r"""Adds a user provided Trial to a Study. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CreateTrialRequest, dict]): - The request object. Request message for - [VizierService.CreateTrial][google.cloud.aiplatform.v1.VizierService.CreateTrial]. - parent (:class:`str`): - Required. The resource name of the Study to create the - Trial in. Format: - ``projects/{project}/locations/{location}/studies/{study}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - trial (:class:`google.cloud.aiplatform_v1.types.Trial`): - Required. The Trial to create. - This corresponds to the ``trial`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Trial: - A message representing a Trial. A - Trial contains a unique set of - Parameters that has been or will be - evaluated, along with the objective - metrics got by running the Trial. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, trial]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = vizier_service.CreateTrialRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if trial is not None: - request.trial = trial - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_trial, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_trial(self, - request: Union[vizier_service.GetTrialRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> study.Trial: - r"""Gets a Trial. - - Args: - request (Union[google.cloud.aiplatform_v1.types.GetTrialRequest, dict]): - The request object. Request message for - [VizierService.GetTrial][google.cloud.aiplatform.v1.VizierService.GetTrial]. - name (:class:`str`): - Required. The name of the Trial resource. Format: - ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Trial: - A message representing a Trial. A - Trial contains a unique set of - Parameters that has been or will be - evaluated, along with the objective - metrics got by running the Trial. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = vizier_service.GetTrialRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_trial, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_trials(self, - request: Union[vizier_service.ListTrialsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListTrialsAsyncPager: - r"""Lists the Trials associated with a Study. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListTrialsRequest, dict]): - The request object. Request message for - [VizierService.ListTrials][google.cloud.aiplatform.v1.VizierService.ListTrials]. - parent (:class:`str`): - Required. The resource name of the Study to list the - Trial from. Format: - ``projects/{project}/locations/{location}/studies/{study}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.vizier_service.pagers.ListTrialsAsyncPager: - Response message for - [VizierService.ListTrials][google.cloud.aiplatform.v1.VizierService.ListTrials]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = vizier_service.ListTrialsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_trials, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListTrialsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def add_trial_measurement(self, - request: Union[vizier_service.AddTrialMeasurementRequest, dict] = None, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> study.Trial: - r"""Adds a measurement of the objective metrics to a - Trial. This measurement is assumed to have been taken - before the Trial is complete. - - Args: - request (Union[google.cloud.aiplatform_v1.types.AddTrialMeasurementRequest, dict]): - The request object. Request message for - [VizierService.AddTrialMeasurement][google.cloud.aiplatform.v1.VizierService.AddTrialMeasurement]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Trial: - A message representing a Trial. A - Trial contains a unique set of - Parameters that has been or will be - evaluated, along with the objective - metrics got by running the Trial. - - """ - # Create or coerce a protobuf request object. - request = vizier_service.AddTrialMeasurementRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.add_trial_measurement, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("trial_name", request.trial_name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def complete_trial(self, - request: Union[vizier_service.CompleteTrialRequest, dict] = None, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> study.Trial: - r"""Marks a Trial as complete. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CompleteTrialRequest, dict]): - The request object. Request message for - [VizierService.CompleteTrial][google.cloud.aiplatform.v1.VizierService.CompleteTrial]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Trial: - A message representing a Trial. A - Trial contains a unique set of - Parameters that has been or will be - evaluated, along with the objective - metrics got by running the Trial. - - """ - # Create or coerce a protobuf request object. - request = vizier_service.CompleteTrialRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.complete_trial, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_trial(self, - request: Union[vizier_service.DeleteTrialRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Deletes a Trial. - - Args: - request (Union[google.cloud.aiplatform_v1.types.DeleteTrialRequest, dict]): - The request object. Request message for - [VizierService.DeleteTrial][google.cloud.aiplatform.v1.VizierService.DeleteTrial]. - name (:class:`str`): - Required. The Trial's name. Format: - ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = vizier_service.DeleteTrialRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_trial, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - async def check_trial_early_stopping_state(self, - request: Union[vizier_service.CheckTrialEarlyStoppingStateRequest, dict] = None, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Checks whether a Trial should stop or not. Returns a - long-running operation. When the operation is successful, it - will contain a - [CheckTrialEarlyStoppingStateResponse][google.cloud.ml.v1.CheckTrialEarlyStoppingStateResponse]. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CheckTrialEarlyStoppingStateRequest, dict]): - The request object. Request message for - [VizierService.CheckTrialEarlyStoppingState][google.cloud.aiplatform.v1.VizierService.CheckTrialEarlyStoppingState]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1.types.CheckTrialEarlyStoppingStateResponse` - Response message for - [VizierService.CheckTrialEarlyStoppingState][google.cloud.aiplatform.v1.VizierService.CheckTrialEarlyStoppingState]. - - """ - # Create or coerce a protobuf request object. - request = vizier_service.CheckTrialEarlyStoppingStateRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.check_trial_early_stopping_state, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("trial_name", request.trial_name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - vizier_service.CheckTrialEarlyStoppingStateResponse, - metadata_type=vizier_service.CheckTrialEarlyStoppingStateMetatdata, - ) - - # Done; return the response. - return response - - async def stop_trial(self, - request: Union[vizier_service.StopTrialRequest, dict] = None, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> study.Trial: - r"""Stops a Trial. - - Args: - request (Union[google.cloud.aiplatform_v1.types.StopTrialRequest, dict]): - The request object. Request message for - [VizierService.StopTrial][google.cloud.aiplatform.v1.VizierService.StopTrial]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Trial: - A message representing a Trial. A - Trial contains a unique set of - Parameters that has been or will be - evaluated, along with the objective - metrics got by running the Trial. - - """ - # Create or coerce a protobuf request object. - request = vizier_service.StopTrialRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.stop_trial, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_optimal_trials(self, - request: Union[vizier_service.ListOptimalTrialsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> vizier_service.ListOptimalTrialsResponse: - r"""Lists the pareto-optimal Trials for multi-objective Study or the - optimal Trials for single-objective Study. The definition of - pareto-optimal can be checked in wiki page. - https://en.wikipedia.org/wiki/Pareto_efficiency - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListOptimalTrialsRequest, dict]): - The request object. Request message for - [VizierService.ListOptimalTrials][google.cloud.aiplatform.v1.VizierService.ListOptimalTrials]. - parent (:class:`str`): - Required. The name of the Study that - the optimal Trial belongs to. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.ListOptimalTrialsResponse: - Response message for - [VizierService.ListOptimalTrials][google.cloud.aiplatform.v1.VizierService.ListOptimalTrials]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = vizier_service.ListOptimalTrialsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_optimal_trials, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def __aenter__(self): - return self - - async def __aexit__(self, exc_type, exc, tb): - await self.transport.close() - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "VizierServiceAsyncClient", -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/client.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/client.py deleted file mode 100644 index c276d57972..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/client.py +++ /dev/null @@ -1,1513 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import os -import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api_core import operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1.services.vizier_service import pagers -from google.cloud.aiplatform_v1.types import study -from google.cloud.aiplatform_v1.types import study as gca_study -from google.cloud.aiplatform_v1.types import vizier_service -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import VizierServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import VizierServiceGrpcTransport -from .transports.grpc_asyncio import VizierServiceGrpcAsyncIOTransport - - -class VizierServiceClientMeta(type): - """Metaclass for the VizierService client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[VizierServiceTransport]] - _transport_registry["grpc"] = VizierServiceGrpcTransport - _transport_registry["grpc_asyncio"] = VizierServiceGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[VizierServiceTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class VizierServiceClient(metaclass=VizierServiceClientMeta): - """Vertex AI Vizier API. - Vertex AI Vizier is a service to solve blackbox optimization - problems, such as tuning machine learning hyperparameters and - searching over deep learning architectures. - """ - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "aiplatform.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - VizierServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - VizierServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> VizierServiceTransport: - """Returns the transport used by the client instance. - - Returns: - VizierServiceTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def custom_job_path(project: str,location: str,custom_job: str,) -> str: - """Returns a fully-qualified custom_job string.""" - return "projects/{project}/locations/{location}/customJobs/{custom_job}".format(project=project, location=location, custom_job=custom_job, ) - - @staticmethod - def parse_custom_job_path(path: str) -> Dict[str,str]: - """Parses a custom_job path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/customJobs/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def study_path(project: str,location: str,study: str,) -> str: - """Returns a fully-qualified study string.""" - return "projects/{project}/locations/{location}/studies/{study}".format(project=project, location=location, study=study, ) - - @staticmethod - def parse_study_path(path: str) -> Dict[str,str]: - """Parses a study path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/studies/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def trial_path(project: str,location: str,study: str,trial: str,) -> str: - """Returns a fully-qualified trial string.""" - return "projects/{project}/locations/{location}/studies/{study}/trials/{trial}".format(project=project, location=location, study=study, trial=trial, ) - - @staticmethod - def parse_trial_path(path: str) -> Dict[str,str]: - """Parses a trial path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/studies/(?P.+?)/trials/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, VizierServiceTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the vizier service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, VizierServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): - raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, VizierServiceTransport): - # transport is a VizierServiceTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - always_use_jwt_access=True, - ) - - def create_study(self, - request: Union[vizier_service.CreateStudyRequest, dict] = None, - *, - parent: str = None, - study: gca_study.Study = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_study.Study: - r"""Creates a Study. A resource name will be generated - after creation of the Study. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CreateStudyRequest, dict]): - The request object. Request message for - [VizierService.CreateStudy][google.cloud.aiplatform.v1.VizierService.CreateStudy]. - parent (str): - Required. The resource name of the Location to create - the CustomJob in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - study (google.cloud.aiplatform_v1.types.Study): - Required. The Study configuration - used to create the Study. - - This corresponds to the ``study`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Study: - LINT.IfChange - A message representing a Study. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, study]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a vizier_service.CreateStudyRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, vizier_service.CreateStudyRequest): - request = vizier_service.CreateStudyRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if study is not None: - request.study = study - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_study] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_study(self, - request: Union[vizier_service.GetStudyRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> study.Study: - r"""Gets a Study by name. - - Args: - request (Union[google.cloud.aiplatform_v1.types.GetStudyRequest, dict]): - The request object. Request message for - [VizierService.GetStudy][google.cloud.aiplatform.v1.VizierService.GetStudy]. - name (str): - Required. The name of the Study resource. Format: - ``projects/{project}/locations/{location}/studies/{study}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Study: - LINT.IfChange - A message representing a Study. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a vizier_service.GetStudyRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, vizier_service.GetStudyRequest): - request = vizier_service.GetStudyRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_study] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_studies(self, - request: Union[vizier_service.ListStudiesRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListStudiesPager: - r"""Lists all the studies in a region for an associated - project. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListStudiesRequest, dict]): - The request object. Request message for - [VizierService.ListStudies][google.cloud.aiplatform.v1.VizierService.ListStudies]. - parent (str): - Required. The resource name of the Location to list the - Study from. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.vizier_service.pagers.ListStudiesPager: - Response message for - [VizierService.ListStudies][google.cloud.aiplatform.v1.VizierService.ListStudies]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a vizier_service.ListStudiesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, vizier_service.ListStudiesRequest): - request = vizier_service.ListStudiesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_studies] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListStudiesPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_study(self, - request: Union[vizier_service.DeleteStudyRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Deletes a Study. - - Args: - request (Union[google.cloud.aiplatform_v1.types.DeleteStudyRequest, dict]): - The request object. Request message for - [VizierService.DeleteStudy][google.cloud.aiplatform.v1.VizierService.DeleteStudy]. - name (str): - Required. The name of the Study resource to be deleted. - Format: - ``projects/{project}/locations/{location}/studies/{study}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a vizier_service.DeleteStudyRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, vizier_service.DeleteStudyRequest): - request = vizier_service.DeleteStudyRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_study] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - def lookup_study(self, - request: Union[vizier_service.LookupStudyRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> study.Study: - r"""Looks a study up using the user-defined display_name field - instead of the fully qualified resource name. - - Args: - request (Union[google.cloud.aiplatform_v1.types.LookupStudyRequest, dict]): - The request object. Request message for - [VizierService.LookupStudy][google.cloud.aiplatform.v1.VizierService.LookupStudy]. - parent (str): - Required. The resource name of the Location to get the - Study from. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Study: - LINT.IfChange - A message representing a Study. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a vizier_service.LookupStudyRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, vizier_service.LookupStudyRequest): - request = vizier_service.LookupStudyRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.lookup_study] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def suggest_trials(self, - request: Union[vizier_service.SuggestTrialsRequest, dict] = None, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: - r"""Adds one or more Trials to a Study, with parameter values - suggested by Vertex AI Vizier. Returns a long-running operation - associated with the generation of Trial suggestions. When this - long-running operation succeeds, it will contain a - [SuggestTrialsResponse][google.cloud.ml.v1.SuggestTrialsResponse]. - - Args: - request (Union[google.cloud.aiplatform_v1.types.SuggestTrialsRequest, dict]): - The request object. Request message for - [VizierService.SuggestTrials][google.cloud.aiplatform.v1.VizierService.SuggestTrials]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1.types.SuggestTrialsResponse` - Response message for - [VizierService.SuggestTrials][google.cloud.aiplatform.v1.VizierService.SuggestTrials]. - - """ - # Create or coerce a protobuf request object. - # Minor optimization to avoid making a copy if the user passes - # in a vizier_service.SuggestTrialsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, vizier_service.SuggestTrialsRequest): - request = vizier_service.SuggestTrialsRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.suggest_trials] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - vizier_service.SuggestTrialsResponse, - metadata_type=vizier_service.SuggestTrialsMetadata, - ) - - # Done; return the response. - return response - - def create_trial(self, - request: Union[vizier_service.CreateTrialRequest, dict] = None, - *, - parent: str = None, - trial: study.Trial = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> study.Trial: - r"""Adds a user provided Trial to a Study. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CreateTrialRequest, dict]): - The request object. Request message for - [VizierService.CreateTrial][google.cloud.aiplatform.v1.VizierService.CreateTrial]. - parent (str): - Required. The resource name of the Study to create the - Trial in. Format: - ``projects/{project}/locations/{location}/studies/{study}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - trial (google.cloud.aiplatform_v1.types.Trial): - Required. The Trial to create. - This corresponds to the ``trial`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Trial: - A message representing a Trial. A - Trial contains a unique set of - Parameters that has been or will be - evaluated, along with the objective - metrics got by running the Trial. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, trial]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a vizier_service.CreateTrialRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, vizier_service.CreateTrialRequest): - request = vizier_service.CreateTrialRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if trial is not None: - request.trial = trial - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_trial] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_trial(self, - request: Union[vizier_service.GetTrialRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> study.Trial: - r"""Gets a Trial. - - Args: - request (Union[google.cloud.aiplatform_v1.types.GetTrialRequest, dict]): - The request object. Request message for - [VizierService.GetTrial][google.cloud.aiplatform.v1.VizierService.GetTrial]. - name (str): - Required. The name of the Trial resource. Format: - ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Trial: - A message representing a Trial. A - Trial contains a unique set of - Parameters that has been or will be - evaluated, along with the objective - metrics got by running the Trial. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a vizier_service.GetTrialRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, vizier_service.GetTrialRequest): - request = vizier_service.GetTrialRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_trial] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_trials(self, - request: Union[vizier_service.ListTrialsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListTrialsPager: - r"""Lists the Trials associated with a Study. - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListTrialsRequest, dict]): - The request object. Request message for - [VizierService.ListTrials][google.cloud.aiplatform.v1.VizierService.ListTrials]. - parent (str): - Required. The resource name of the Study to list the - Trial from. Format: - ``projects/{project}/locations/{location}/studies/{study}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.services.vizier_service.pagers.ListTrialsPager: - Response message for - [VizierService.ListTrials][google.cloud.aiplatform.v1.VizierService.ListTrials]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a vizier_service.ListTrialsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, vizier_service.ListTrialsRequest): - request = vizier_service.ListTrialsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_trials] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListTrialsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def add_trial_measurement(self, - request: Union[vizier_service.AddTrialMeasurementRequest, dict] = None, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> study.Trial: - r"""Adds a measurement of the objective metrics to a - Trial. This measurement is assumed to have been taken - before the Trial is complete. - - Args: - request (Union[google.cloud.aiplatform_v1.types.AddTrialMeasurementRequest, dict]): - The request object. Request message for - [VizierService.AddTrialMeasurement][google.cloud.aiplatform.v1.VizierService.AddTrialMeasurement]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Trial: - A message representing a Trial. A - Trial contains a unique set of - Parameters that has been or will be - evaluated, along with the objective - metrics got by running the Trial. - - """ - # Create or coerce a protobuf request object. - # Minor optimization to avoid making a copy if the user passes - # in a vizier_service.AddTrialMeasurementRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, vizier_service.AddTrialMeasurementRequest): - request = vizier_service.AddTrialMeasurementRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.add_trial_measurement] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("trial_name", request.trial_name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def complete_trial(self, - request: Union[vizier_service.CompleteTrialRequest, dict] = None, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> study.Trial: - r"""Marks a Trial as complete. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CompleteTrialRequest, dict]): - The request object. Request message for - [VizierService.CompleteTrial][google.cloud.aiplatform.v1.VizierService.CompleteTrial]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Trial: - A message representing a Trial. A - Trial contains a unique set of - Parameters that has been or will be - evaluated, along with the objective - metrics got by running the Trial. - - """ - # Create or coerce a protobuf request object. - # Minor optimization to avoid making a copy if the user passes - # in a vizier_service.CompleteTrialRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, vizier_service.CompleteTrialRequest): - request = vizier_service.CompleteTrialRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.complete_trial] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_trial(self, - request: Union[vizier_service.DeleteTrialRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Deletes a Trial. - - Args: - request (Union[google.cloud.aiplatform_v1.types.DeleteTrialRequest, dict]): - The request object. Request message for - [VizierService.DeleteTrial][google.cloud.aiplatform.v1.VizierService.DeleteTrial]. - name (str): - Required. The Trial's name. Format: - ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a vizier_service.DeleteTrialRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, vizier_service.DeleteTrialRequest): - request = vizier_service.DeleteTrialRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_trial] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - def check_trial_early_stopping_state(self, - request: Union[vizier_service.CheckTrialEarlyStoppingStateRequest, dict] = None, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: - r"""Checks whether a Trial should stop or not. Returns a - long-running operation. When the operation is successful, it - will contain a - [CheckTrialEarlyStoppingStateResponse][google.cloud.ml.v1.CheckTrialEarlyStoppingStateResponse]. - - Args: - request (Union[google.cloud.aiplatform_v1.types.CheckTrialEarlyStoppingStateRequest, dict]): - The request object. Request message for - [VizierService.CheckTrialEarlyStoppingState][google.cloud.aiplatform.v1.VizierService.CheckTrialEarlyStoppingState]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1.types.CheckTrialEarlyStoppingStateResponse` - Response message for - [VizierService.CheckTrialEarlyStoppingState][google.cloud.aiplatform.v1.VizierService.CheckTrialEarlyStoppingState]. - - """ - # Create or coerce a protobuf request object. - # Minor optimization to avoid making a copy if the user passes - # in a vizier_service.CheckTrialEarlyStoppingStateRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, vizier_service.CheckTrialEarlyStoppingStateRequest): - request = vizier_service.CheckTrialEarlyStoppingStateRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.check_trial_early_stopping_state] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("trial_name", request.trial_name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - vizier_service.CheckTrialEarlyStoppingStateResponse, - metadata_type=vizier_service.CheckTrialEarlyStoppingStateMetatdata, - ) - - # Done; return the response. - return response - - def stop_trial(self, - request: Union[vizier_service.StopTrialRequest, dict] = None, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> study.Trial: - r"""Stops a Trial. - - Args: - request (Union[google.cloud.aiplatform_v1.types.StopTrialRequest, dict]): - The request object. Request message for - [VizierService.StopTrial][google.cloud.aiplatform.v1.VizierService.StopTrial]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.Trial: - A message representing a Trial. A - Trial contains a unique set of - Parameters that has been or will be - evaluated, along with the objective - metrics got by running the Trial. - - """ - # Create or coerce a protobuf request object. - # Minor optimization to avoid making a copy if the user passes - # in a vizier_service.StopTrialRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, vizier_service.StopTrialRequest): - request = vizier_service.StopTrialRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.stop_trial] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_optimal_trials(self, - request: Union[vizier_service.ListOptimalTrialsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> vizier_service.ListOptimalTrialsResponse: - r"""Lists the pareto-optimal Trials for multi-objective Study or the - optimal Trials for single-objective Study. The definition of - pareto-optimal can be checked in wiki page. - https://en.wikipedia.org/wiki/Pareto_efficiency - - Args: - request (Union[google.cloud.aiplatform_v1.types.ListOptimalTrialsRequest, dict]): - The request object. Request message for - [VizierService.ListOptimalTrials][google.cloud.aiplatform.v1.VizierService.ListOptimalTrials]. - parent (str): - Required. The name of the Study that - the optimal Trial belongs to. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1.types.ListOptimalTrialsResponse: - Response message for - [VizierService.ListOptimalTrials][google.cloud.aiplatform.v1.VizierService.ListOptimalTrials]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a vizier_service.ListOptimalTrialsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, vizier_service.ListOptimalTrialsRequest): - request = vizier_service.ListOptimalTrialsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_optimal_trials] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - """Releases underlying transport's resources. - - .. warning:: - ONLY use as a context manager if the transport is NOT shared - with other clients! Exiting the with block will CLOSE the transport - and may cause errors in other clients! - """ - self.transport.close() - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "VizierServiceClient", -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/pagers.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/pagers.py deleted file mode 100644 index 7b1b38e372..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/pagers.py +++ /dev/null @@ -1,263 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator - -from google.cloud.aiplatform_v1.types import study -from google.cloud.aiplatform_v1.types import vizier_service - - -class ListStudiesPager: - """A pager for iterating through ``list_studies`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListStudiesResponse` object, and - provides an ``__iter__`` method to iterate through its - ``studies`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListStudies`` requests and continue to iterate - through the ``studies`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListStudiesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., vizier_service.ListStudiesResponse], - request: vizier_service.ListStudiesRequest, - response: vizier_service.ListStudiesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListStudiesRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListStudiesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = vizier_service.ListStudiesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[vizier_service.ListStudiesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[study.Study]: - for page in self.pages: - yield from page.studies - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListStudiesAsyncPager: - """A pager for iterating through ``list_studies`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListStudiesResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``studies`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListStudies`` requests and continue to iterate - through the ``studies`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListStudiesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[vizier_service.ListStudiesResponse]], - request: vizier_service.ListStudiesRequest, - response: vizier_service.ListStudiesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListStudiesRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListStudiesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = vizier_service.ListStudiesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[vizier_service.ListStudiesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[study.Study]: - async def async_generator(): - async for page in self.pages: - for response in page.studies: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListTrialsPager: - """A pager for iterating through ``list_trials`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListTrialsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``trials`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListTrials`` requests and continue to iterate - through the ``trials`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListTrialsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., vizier_service.ListTrialsResponse], - request: vizier_service.ListTrialsRequest, - response: vizier_service.ListTrialsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListTrialsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListTrialsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = vizier_service.ListTrialsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[vizier_service.ListTrialsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[study.Trial]: - for page in self.pages: - yield from page.trials - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListTrialsAsyncPager: - """A pager for iterating through ``list_trials`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1.types.ListTrialsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``trials`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListTrials`` requests and continue to iterate - through the ``trials`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1.types.ListTrialsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[vizier_service.ListTrialsResponse]], - request: vizier_service.ListTrialsRequest, - response: vizier_service.ListTrialsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1.types.ListTrialsRequest): - The initial request object. - response (google.cloud.aiplatform_v1.types.ListTrialsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = vizier_service.ListTrialsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[vizier_service.ListTrialsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[study.Trial]: - async def async_generator(): - async for page in self.pages: - for response in page.trials: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/transports/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/transports/__init__.py deleted file mode 100644 index afc70ea68e..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import VizierServiceTransport -from .grpc import VizierServiceGrpcTransport -from .grpc_asyncio import VizierServiceGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[VizierServiceTransport]] -_transport_registry['grpc'] = VizierServiceGrpcTransport -_transport_registry['grpc_asyncio'] = VizierServiceGrpcAsyncIOTransport - -__all__ = ( - 'VizierServiceTransport', - 'VizierServiceGrpcTransport', - 'VizierServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/transports/base.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/transports/base.py deleted file mode 100644 index 21bf1fd8b8..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/transports/base.py +++ /dev/null @@ -1,352 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import pkg_resources - -import google.auth # type: ignore -import google.api_core -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.aiplatform_v1.types import study -from google.cloud.aiplatform_v1.types import study as gca_study -from google.cloud.aiplatform_v1.types import vizier_service -from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-aiplatform', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -class VizierServiceTransport(abc.ABC): - """Abstract transport class for VizierService.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'aiplatform.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials are service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.create_study: gapic_v1.method.wrap_method( - self.create_study, - default_timeout=None, - client_info=client_info, - ), - self.get_study: gapic_v1.method.wrap_method( - self.get_study, - default_timeout=None, - client_info=client_info, - ), - self.list_studies: gapic_v1.method.wrap_method( - self.list_studies, - default_timeout=None, - client_info=client_info, - ), - self.delete_study: gapic_v1.method.wrap_method( - self.delete_study, - default_timeout=None, - client_info=client_info, - ), - self.lookup_study: gapic_v1.method.wrap_method( - self.lookup_study, - default_timeout=None, - client_info=client_info, - ), - self.suggest_trials: gapic_v1.method.wrap_method( - self.suggest_trials, - default_timeout=None, - client_info=client_info, - ), - self.create_trial: gapic_v1.method.wrap_method( - self.create_trial, - default_timeout=None, - client_info=client_info, - ), - self.get_trial: gapic_v1.method.wrap_method( - self.get_trial, - default_timeout=None, - client_info=client_info, - ), - self.list_trials: gapic_v1.method.wrap_method( - self.list_trials, - default_timeout=None, - client_info=client_info, - ), - self.add_trial_measurement: gapic_v1.method.wrap_method( - self.add_trial_measurement, - default_timeout=None, - client_info=client_info, - ), - self.complete_trial: gapic_v1.method.wrap_method( - self.complete_trial, - default_timeout=None, - client_info=client_info, - ), - self.delete_trial: gapic_v1.method.wrap_method( - self.delete_trial, - default_timeout=None, - client_info=client_info, - ), - self.check_trial_early_stopping_state: gapic_v1.method.wrap_method( - self.check_trial_early_stopping_state, - default_timeout=None, - client_info=client_info, - ), - self.stop_trial: gapic_v1.method.wrap_method( - self.stop_trial, - default_timeout=None, - client_info=client_info, - ), - self.list_optimal_trials: gapic_v1.method.wrap_method( - self.list_optimal_trials, - default_timeout=None, - client_info=client_info, - ), - } - - def close(self): - """Closes resources associated with the transport. - - .. warning:: - Only call this method if the transport is NOT shared - with other clients - this may cause errors in other clients! - """ - raise NotImplementedError() - - @property - def operations_client(self): - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def create_study(self) -> Callable[ - [vizier_service.CreateStudyRequest], - Union[ - gca_study.Study, - Awaitable[gca_study.Study] - ]]: - raise NotImplementedError() - - @property - def get_study(self) -> Callable[ - [vizier_service.GetStudyRequest], - Union[ - study.Study, - Awaitable[study.Study] - ]]: - raise NotImplementedError() - - @property - def list_studies(self) -> Callable[ - [vizier_service.ListStudiesRequest], - Union[ - vizier_service.ListStudiesResponse, - Awaitable[vizier_service.ListStudiesResponse] - ]]: - raise NotImplementedError() - - @property - def delete_study(self) -> Callable[ - [vizier_service.DeleteStudyRequest], - Union[ - empty_pb2.Empty, - Awaitable[empty_pb2.Empty] - ]]: - raise NotImplementedError() - - @property - def lookup_study(self) -> Callable[ - [vizier_service.LookupStudyRequest], - Union[ - study.Study, - Awaitable[study.Study] - ]]: - raise NotImplementedError() - - @property - def suggest_trials(self) -> Callable[ - [vizier_service.SuggestTrialsRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def create_trial(self) -> Callable[ - [vizier_service.CreateTrialRequest], - Union[ - study.Trial, - Awaitable[study.Trial] - ]]: - raise NotImplementedError() - - @property - def get_trial(self) -> Callable[ - [vizier_service.GetTrialRequest], - Union[ - study.Trial, - Awaitable[study.Trial] - ]]: - raise NotImplementedError() - - @property - def list_trials(self) -> Callable[ - [vizier_service.ListTrialsRequest], - Union[ - vizier_service.ListTrialsResponse, - Awaitable[vizier_service.ListTrialsResponse] - ]]: - raise NotImplementedError() - - @property - def add_trial_measurement(self) -> Callable[ - [vizier_service.AddTrialMeasurementRequest], - Union[ - study.Trial, - Awaitable[study.Trial] - ]]: - raise NotImplementedError() - - @property - def complete_trial(self) -> Callable[ - [vizier_service.CompleteTrialRequest], - Union[ - study.Trial, - Awaitable[study.Trial] - ]]: - raise NotImplementedError() - - @property - def delete_trial(self) -> Callable[ - [vizier_service.DeleteTrialRequest], - Union[ - empty_pb2.Empty, - Awaitable[empty_pb2.Empty] - ]]: - raise NotImplementedError() - - @property - def check_trial_early_stopping_state(self) -> Callable[ - [vizier_service.CheckTrialEarlyStoppingStateRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def stop_trial(self) -> Callable[ - [vizier_service.StopTrialRequest], - Union[ - study.Trial, - Awaitable[study.Trial] - ]]: - raise NotImplementedError() - - @property - def list_optimal_trials(self) -> Callable[ - [vizier_service.ListOptimalTrialsRequest], - Union[ - vizier_service.ListOptimalTrialsResponse, - Awaitable[vizier_service.ListOptimalTrialsResponse] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'VizierServiceTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/transports/grpc.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/transports/grpc.py deleted file mode 100644 index 5832fb75fd..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/transports/grpc.py +++ /dev/null @@ -1,659 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers -from google.api_core import operations_v1 -from google.api_core import gapic_v1 -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.aiplatform_v1.types import study -from google.cloud.aiplatform_v1.types import study as gca_study -from google.cloud.aiplatform_v1.types import vizier_service -from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from .base import VizierServiceTransport, DEFAULT_CLIENT_INFO - - -class VizierServiceGrpcTransport(VizierServiceTransport): - """gRPC backend transport for VizierService. - - Vertex AI Vizier API. - Vertex AI Vizier is a service to solve blackbox optimization - problems, such as tuning machine learning hyperparameters and - searching over deep learning architectures. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_study(self) -> Callable[ - [vizier_service.CreateStudyRequest], - gca_study.Study]: - r"""Return a callable for the create study method over gRPC. - - Creates a Study. A resource name will be generated - after creation of the Study. - - Returns: - Callable[[~.CreateStudyRequest], - ~.Study]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_study' not in self._stubs: - self._stubs['create_study'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.VizierService/CreateStudy', - request_serializer=vizier_service.CreateStudyRequest.serialize, - response_deserializer=gca_study.Study.deserialize, - ) - return self._stubs['create_study'] - - @property - def get_study(self) -> Callable[ - [vizier_service.GetStudyRequest], - study.Study]: - r"""Return a callable for the get study method over gRPC. - - Gets a Study by name. - - Returns: - Callable[[~.GetStudyRequest], - ~.Study]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_study' not in self._stubs: - self._stubs['get_study'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.VizierService/GetStudy', - request_serializer=vizier_service.GetStudyRequest.serialize, - response_deserializer=study.Study.deserialize, - ) - return self._stubs['get_study'] - - @property - def list_studies(self) -> Callable[ - [vizier_service.ListStudiesRequest], - vizier_service.ListStudiesResponse]: - r"""Return a callable for the list studies method over gRPC. - - Lists all the studies in a region for an associated - project. - - Returns: - Callable[[~.ListStudiesRequest], - ~.ListStudiesResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_studies' not in self._stubs: - self._stubs['list_studies'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.VizierService/ListStudies', - request_serializer=vizier_service.ListStudiesRequest.serialize, - response_deserializer=vizier_service.ListStudiesResponse.deserialize, - ) - return self._stubs['list_studies'] - - @property - def delete_study(self) -> Callable[ - [vizier_service.DeleteStudyRequest], - empty_pb2.Empty]: - r"""Return a callable for the delete study method over gRPC. - - Deletes a Study. - - Returns: - Callable[[~.DeleteStudyRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_study' not in self._stubs: - self._stubs['delete_study'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.VizierService/DeleteStudy', - request_serializer=vizier_service.DeleteStudyRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['delete_study'] - - @property - def lookup_study(self) -> Callable[ - [vizier_service.LookupStudyRequest], - study.Study]: - r"""Return a callable for the lookup study method over gRPC. - - Looks a study up using the user-defined display_name field - instead of the fully qualified resource name. - - Returns: - Callable[[~.LookupStudyRequest], - ~.Study]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'lookup_study' not in self._stubs: - self._stubs['lookup_study'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.VizierService/LookupStudy', - request_serializer=vizier_service.LookupStudyRequest.serialize, - response_deserializer=study.Study.deserialize, - ) - return self._stubs['lookup_study'] - - @property - def suggest_trials(self) -> Callable[ - [vizier_service.SuggestTrialsRequest], - operations_pb2.Operation]: - r"""Return a callable for the suggest trials method over gRPC. - - Adds one or more Trials to a Study, with parameter values - suggested by Vertex AI Vizier. Returns a long-running operation - associated with the generation of Trial suggestions. When this - long-running operation succeeds, it will contain a - [SuggestTrialsResponse][google.cloud.ml.v1.SuggestTrialsResponse]. - - Returns: - Callable[[~.SuggestTrialsRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'suggest_trials' not in self._stubs: - self._stubs['suggest_trials'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.VizierService/SuggestTrials', - request_serializer=vizier_service.SuggestTrialsRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['suggest_trials'] - - @property - def create_trial(self) -> Callable[ - [vizier_service.CreateTrialRequest], - study.Trial]: - r"""Return a callable for the create trial method over gRPC. - - Adds a user provided Trial to a Study. - - Returns: - Callable[[~.CreateTrialRequest], - ~.Trial]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_trial' not in self._stubs: - self._stubs['create_trial'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.VizierService/CreateTrial', - request_serializer=vizier_service.CreateTrialRequest.serialize, - response_deserializer=study.Trial.deserialize, - ) - return self._stubs['create_trial'] - - @property - def get_trial(self) -> Callable[ - [vizier_service.GetTrialRequest], - study.Trial]: - r"""Return a callable for the get trial method over gRPC. - - Gets a Trial. - - Returns: - Callable[[~.GetTrialRequest], - ~.Trial]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_trial' not in self._stubs: - self._stubs['get_trial'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.VizierService/GetTrial', - request_serializer=vizier_service.GetTrialRequest.serialize, - response_deserializer=study.Trial.deserialize, - ) - return self._stubs['get_trial'] - - @property - def list_trials(self) -> Callable[ - [vizier_service.ListTrialsRequest], - vizier_service.ListTrialsResponse]: - r"""Return a callable for the list trials method over gRPC. - - Lists the Trials associated with a Study. - - Returns: - Callable[[~.ListTrialsRequest], - ~.ListTrialsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_trials' not in self._stubs: - self._stubs['list_trials'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.VizierService/ListTrials', - request_serializer=vizier_service.ListTrialsRequest.serialize, - response_deserializer=vizier_service.ListTrialsResponse.deserialize, - ) - return self._stubs['list_trials'] - - @property - def add_trial_measurement(self) -> Callable[ - [vizier_service.AddTrialMeasurementRequest], - study.Trial]: - r"""Return a callable for the add trial measurement method over gRPC. - - Adds a measurement of the objective metrics to a - Trial. This measurement is assumed to have been taken - before the Trial is complete. - - Returns: - Callable[[~.AddTrialMeasurementRequest], - ~.Trial]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'add_trial_measurement' not in self._stubs: - self._stubs['add_trial_measurement'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.VizierService/AddTrialMeasurement', - request_serializer=vizier_service.AddTrialMeasurementRequest.serialize, - response_deserializer=study.Trial.deserialize, - ) - return self._stubs['add_trial_measurement'] - - @property - def complete_trial(self) -> Callable[ - [vizier_service.CompleteTrialRequest], - study.Trial]: - r"""Return a callable for the complete trial method over gRPC. - - Marks a Trial as complete. - - Returns: - Callable[[~.CompleteTrialRequest], - ~.Trial]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'complete_trial' not in self._stubs: - self._stubs['complete_trial'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.VizierService/CompleteTrial', - request_serializer=vizier_service.CompleteTrialRequest.serialize, - response_deserializer=study.Trial.deserialize, - ) - return self._stubs['complete_trial'] - - @property - def delete_trial(self) -> Callable[ - [vizier_service.DeleteTrialRequest], - empty_pb2.Empty]: - r"""Return a callable for the delete trial method over gRPC. - - Deletes a Trial. - - Returns: - Callable[[~.DeleteTrialRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_trial' not in self._stubs: - self._stubs['delete_trial'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.VizierService/DeleteTrial', - request_serializer=vizier_service.DeleteTrialRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['delete_trial'] - - @property - def check_trial_early_stopping_state(self) -> Callable[ - [vizier_service.CheckTrialEarlyStoppingStateRequest], - operations_pb2.Operation]: - r"""Return a callable for the check trial early stopping - state method over gRPC. - - Checks whether a Trial should stop or not. Returns a - long-running operation. When the operation is successful, it - will contain a - [CheckTrialEarlyStoppingStateResponse][google.cloud.ml.v1.CheckTrialEarlyStoppingStateResponse]. - - Returns: - Callable[[~.CheckTrialEarlyStoppingStateRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'check_trial_early_stopping_state' not in self._stubs: - self._stubs['check_trial_early_stopping_state'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.VizierService/CheckTrialEarlyStoppingState', - request_serializer=vizier_service.CheckTrialEarlyStoppingStateRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['check_trial_early_stopping_state'] - - @property - def stop_trial(self) -> Callable[ - [vizier_service.StopTrialRequest], - study.Trial]: - r"""Return a callable for the stop trial method over gRPC. - - Stops a Trial. - - Returns: - Callable[[~.StopTrialRequest], - ~.Trial]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'stop_trial' not in self._stubs: - self._stubs['stop_trial'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.VizierService/StopTrial', - request_serializer=vizier_service.StopTrialRequest.serialize, - response_deserializer=study.Trial.deserialize, - ) - return self._stubs['stop_trial'] - - @property - def list_optimal_trials(self) -> Callable[ - [vizier_service.ListOptimalTrialsRequest], - vizier_service.ListOptimalTrialsResponse]: - r"""Return a callable for the list optimal trials method over gRPC. - - Lists the pareto-optimal Trials for multi-objective Study or the - optimal Trials for single-objective Study. The definition of - pareto-optimal can be checked in wiki page. - https://en.wikipedia.org/wiki/Pareto_efficiency - - Returns: - Callable[[~.ListOptimalTrialsRequest], - ~.ListOptimalTrialsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_optimal_trials' not in self._stubs: - self._stubs['list_optimal_trials'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.VizierService/ListOptimalTrials', - request_serializer=vizier_service.ListOptimalTrialsRequest.serialize, - response_deserializer=vizier_service.ListOptimalTrialsResponse.deserialize, - ) - return self._stubs['list_optimal_trials'] - - def close(self): - self.grpc_channel.close() - -__all__ = ( - 'VizierServiceGrpcTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/transports/grpc_asyncio.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/transports/grpc_asyncio.py deleted file mode 100644 index 09183c2bd4..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/services/vizier_service/transports/grpc_asyncio.py +++ /dev/null @@ -1,663 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers_async -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.aiplatform_v1.types import study -from google.cloud.aiplatform_v1.types import study as gca_study -from google.cloud.aiplatform_v1.types import vizier_service -from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from .base import VizierServiceTransport, DEFAULT_CLIENT_INFO -from .grpc import VizierServiceGrpcTransport - - -class VizierServiceGrpcAsyncIOTransport(VizierServiceTransport): - """gRPC AsyncIO backend transport for VizierService. - - Vertex AI Vizier API. - Vertex AI Vizier is a service to solve blackbox optimization - problems, such as tuning machine learning hyperparameters and - searching over deep learning architectures. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsAsyncClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_study(self) -> Callable[ - [vizier_service.CreateStudyRequest], - Awaitable[gca_study.Study]]: - r"""Return a callable for the create study method over gRPC. - - Creates a Study. A resource name will be generated - after creation of the Study. - - Returns: - Callable[[~.CreateStudyRequest], - Awaitable[~.Study]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_study' not in self._stubs: - self._stubs['create_study'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.VizierService/CreateStudy', - request_serializer=vizier_service.CreateStudyRequest.serialize, - response_deserializer=gca_study.Study.deserialize, - ) - return self._stubs['create_study'] - - @property - def get_study(self) -> Callable[ - [vizier_service.GetStudyRequest], - Awaitable[study.Study]]: - r"""Return a callable for the get study method over gRPC. - - Gets a Study by name. - - Returns: - Callable[[~.GetStudyRequest], - Awaitable[~.Study]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_study' not in self._stubs: - self._stubs['get_study'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.VizierService/GetStudy', - request_serializer=vizier_service.GetStudyRequest.serialize, - response_deserializer=study.Study.deserialize, - ) - return self._stubs['get_study'] - - @property - def list_studies(self) -> Callable[ - [vizier_service.ListStudiesRequest], - Awaitable[vizier_service.ListStudiesResponse]]: - r"""Return a callable for the list studies method over gRPC. - - Lists all the studies in a region for an associated - project. - - Returns: - Callable[[~.ListStudiesRequest], - Awaitable[~.ListStudiesResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_studies' not in self._stubs: - self._stubs['list_studies'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.VizierService/ListStudies', - request_serializer=vizier_service.ListStudiesRequest.serialize, - response_deserializer=vizier_service.ListStudiesResponse.deserialize, - ) - return self._stubs['list_studies'] - - @property - def delete_study(self) -> Callable[ - [vizier_service.DeleteStudyRequest], - Awaitable[empty_pb2.Empty]]: - r"""Return a callable for the delete study method over gRPC. - - Deletes a Study. - - Returns: - Callable[[~.DeleteStudyRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_study' not in self._stubs: - self._stubs['delete_study'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.VizierService/DeleteStudy', - request_serializer=vizier_service.DeleteStudyRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['delete_study'] - - @property - def lookup_study(self) -> Callable[ - [vizier_service.LookupStudyRequest], - Awaitable[study.Study]]: - r"""Return a callable for the lookup study method over gRPC. - - Looks a study up using the user-defined display_name field - instead of the fully qualified resource name. - - Returns: - Callable[[~.LookupStudyRequest], - Awaitable[~.Study]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'lookup_study' not in self._stubs: - self._stubs['lookup_study'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.VizierService/LookupStudy', - request_serializer=vizier_service.LookupStudyRequest.serialize, - response_deserializer=study.Study.deserialize, - ) - return self._stubs['lookup_study'] - - @property - def suggest_trials(self) -> Callable[ - [vizier_service.SuggestTrialsRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the suggest trials method over gRPC. - - Adds one or more Trials to a Study, with parameter values - suggested by Vertex AI Vizier. Returns a long-running operation - associated with the generation of Trial suggestions. When this - long-running operation succeeds, it will contain a - [SuggestTrialsResponse][google.cloud.ml.v1.SuggestTrialsResponse]. - - Returns: - Callable[[~.SuggestTrialsRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'suggest_trials' not in self._stubs: - self._stubs['suggest_trials'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.VizierService/SuggestTrials', - request_serializer=vizier_service.SuggestTrialsRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['suggest_trials'] - - @property - def create_trial(self) -> Callable[ - [vizier_service.CreateTrialRequest], - Awaitable[study.Trial]]: - r"""Return a callable for the create trial method over gRPC. - - Adds a user provided Trial to a Study. - - Returns: - Callable[[~.CreateTrialRequest], - Awaitable[~.Trial]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_trial' not in self._stubs: - self._stubs['create_trial'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.VizierService/CreateTrial', - request_serializer=vizier_service.CreateTrialRequest.serialize, - response_deserializer=study.Trial.deserialize, - ) - return self._stubs['create_trial'] - - @property - def get_trial(self) -> Callable[ - [vizier_service.GetTrialRequest], - Awaitable[study.Trial]]: - r"""Return a callable for the get trial method over gRPC. - - Gets a Trial. - - Returns: - Callable[[~.GetTrialRequest], - Awaitable[~.Trial]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_trial' not in self._stubs: - self._stubs['get_trial'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.VizierService/GetTrial', - request_serializer=vizier_service.GetTrialRequest.serialize, - response_deserializer=study.Trial.deserialize, - ) - return self._stubs['get_trial'] - - @property - def list_trials(self) -> Callable[ - [vizier_service.ListTrialsRequest], - Awaitable[vizier_service.ListTrialsResponse]]: - r"""Return a callable for the list trials method over gRPC. - - Lists the Trials associated with a Study. - - Returns: - Callable[[~.ListTrialsRequest], - Awaitable[~.ListTrialsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_trials' not in self._stubs: - self._stubs['list_trials'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.VizierService/ListTrials', - request_serializer=vizier_service.ListTrialsRequest.serialize, - response_deserializer=vizier_service.ListTrialsResponse.deserialize, - ) - return self._stubs['list_trials'] - - @property - def add_trial_measurement(self) -> Callable[ - [vizier_service.AddTrialMeasurementRequest], - Awaitable[study.Trial]]: - r"""Return a callable for the add trial measurement method over gRPC. - - Adds a measurement of the objective metrics to a - Trial. This measurement is assumed to have been taken - before the Trial is complete. - - Returns: - Callable[[~.AddTrialMeasurementRequest], - Awaitable[~.Trial]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'add_trial_measurement' not in self._stubs: - self._stubs['add_trial_measurement'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.VizierService/AddTrialMeasurement', - request_serializer=vizier_service.AddTrialMeasurementRequest.serialize, - response_deserializer=study.Trial.deserialize, - ) - return self._stubs['add_trial_measurement'] - - @property - def complete_trial(self) -> Callable[ - [vizier_service.CompleteTrialRequest], - Awaitable[study.Trial]]: - r"""Return a callable for the complete trial method over gRPC. - - Marks a Trial as complete. - - Returns: - Callable[[~.CompleteTrialRequest], - Awaitable[~.Trial]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'complete_trial' not in self._stubs: - self._stubs['complete_trial'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.VizierService/CompleteTrial', - request_serializer=vizier_service.CompleteTrialRequest.serialize, - response_deserializer=study.Trial.deserialize, - ) - return self._stubs['complete_trial'] - - @property - def delete_trial(self) -> Callable[ - [vizier_service.DeleteTrialRequest], - Awaitable[empty_pb2.Empty]]: - r"""Return a callable for the delete trial method over gRPC. - - Deletes a Trial. - - Returns: - Callable[[~.DeleteTrialRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_trial' not in self._stubs: - self._stubs['delete_trial'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.VizierService/DeleteTrial', - request_serializer=vizier_service.DeleteTrialRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['delete_trial'] - - @property - def check_trial_early_stopping_state(self) -> Callable[ - [vizier_service.CheckTrialEarlyStoppingStateRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the check trial early stopping - state method over gRPC. - - Checks whether a Trial should stop or not. Returns a - long-running operation. When the operation is successful, it - will contain a - [CheckTrialEarlyStoppingStateResponse][google.cloud.ml.v1.CheckTrialEarlyStoppingStateResponse]. - - Returns: - Callable[[~.CheckTrialEarlyStoppingStateRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'check_trial_early_stopping_state' not in self._stubs: - self._stubs['check_trial_early_stopping_state'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.VizierService/CheckTrialEarlyStoppingState', - request_serializer=vizier_service.CheckTrialEarlyStoppingStateRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['check_trial_early_stopping_state'] - - @property - def stop_trial(self) -> Callable[ - [vizier_service.StopTrialRequest], - Awaitable[study.Trial]]: - r"""Return a callable for the stop trial method over gRPC. - - Stops a Trial. - - Returns: - Callable[[~.StopTrialRequest], - Awaitable[~.Trial]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'stop_trial' not in self._stubs: - self._stubs['stop_trial'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.VizierService/StopTrial', - request_serializer=vizier_service.StopTrialRequest.serialize, - response_deserializer=study.Trial.deserialize, - ) - return self._stubs['stop_trial'] - - @property - def list_optimal_trials(self) -> Callable[ - [vizier_service.ListOptimalTrialsRequest], - Awaitable[vizier_service.ListOptimalTrialsResponse]]: - r"""Return a callable for the list optimal trials method over gRPC. - - Lists the pareto-optimal Trials for multi-objective Study or the - optimal Trials for single-objective Study. The definition of - pareto-optimal can be checked in wiki page. - https://en.wikipedia.org/wiki/Pareto_efficiency - - Returns: - Callable[[~.ListOptimalTrialsRequest], - Awaitable[~.ListOptimalTrialsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_optimal_trials' not in self._stubs: - self._stubs['list_optimal_trials'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1.VizierService/ListOptimalTrials', - request_serializer=vizier_service.ListOptimalTrialsRequest.serialize, - response_deserializer=vizier_service.ListOptimalTrialsResponse.deserialize, - ) - return self._stubs['list_optimal_trials'] - - def close(self): - return self.grpc_channel.close() - - -__all__ = ( - 'VizierServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/__init__.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/__init__.py deleted file mode 100644 index 5938f5bc53..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/__init__.py +++ /dev/null @@ -1,999 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .annotation import ( - Annotation, -) -from .annotation_spec import ( - AnnotationSpec, -) -from .artifact import ( - Artifact, -) -from .batch_prediction_job import ( - BatchPredictionJob, -) -from .completion_stats import ( - CompletionStats, -) -from .context import ( - Context, -) -from .custom_job import ( - ContainerSpec, - CustomJob, - CustomJobSpec, - PythonPackageSpec, - Scheduling, - WorkerPoolSpec, -) -from .data_item import ( - DataItem, -) -from .data_labeling_job import ( - ActiveLearningConfig, - DataLabelingJob, - SampleConfig, - TrainingConfig, -) -from .dataset import ( - Dataset, - ExportDataConfig, - ImportDataConfig, -) -from .dataset_service import ( - CreateDatasetOperationMetadata, - CreateDatasetRequest, - DeleteDatasetRequest, - ExportDataOperationMetadata, - ExportDataRequest, - ExportDataResponse, - GetAnnotationSpecRequest, - GetDatasetRequest, - ImportDataOperationMetadata, - ImportDataRequest, - ImportDataResponse, - ListAnnotationsRequest, - ListAnnotationsResponse, - ListDataItemsRequest, - ListDataItemsResponse, - ListDatasetsRequest, - ListDatasetsResponse, - UpdateDatasetRequest, -) -from .deployed_index_ref import ( - DeployedIndexRef, -) -from .deployed_model_ref import ( - DeployedModelRef, -) -from .encryption_spec import ( - EncryptionSpec, -) -from .endpoint import ( - DeployedModel, - Endpoint, - PrivateEndpoints, -) -from .endpoint_service import ( - CreateEndpointOperationMetadata, - CreateEndpointRequest, - DeleteEndpointRequest, - DeployModelOperationMetadata, - DeployModelRequest, - DeployModelResponse, - GetEndpointRequest, - ListEndpointsRequest, - ListEndpointsResponse, - UndeployModelOperationMetadata, - UndeployModelRequest, - UndeployModelResponse, - UpdateEndpointRequest, -) -from .entity_type import ( - EntityType, -) -from .env_var import ( - EnvVar, -) -from .event import ( - Event, -) -from .execution import ( - Execution, -) -from .explanation import ( - Attribution, - BlurBaselineConfig, - Explanation, - ExplanationMetadataOverride, - ExplanationParameters, - ExplanationSpec, - ExplanationSpecOverride, - FeatureNoiseSigma, - IntegratedGradientsAttribution, - ModelExplanation, - SampledShapleyAttribution, - SmoothGradConfig, - XraiAttribution, -) -from .explanation_metadata import ( - ExplanationMetadata, -) -from .feature import ( - Feature, -) -from .feature_monitoring_stats import ( - FeatureStatsAnomaly, -) -from .feature_selector import ( - FeatureSelector, - IdMatcher, -) -from .featurestore import ( - Featurestore, -) -from .featurestore_online_service import ( - FeatureValue, - FeatureValueList, - ReadFeatureValuesRequest, - ReadFeatureValuesResponse, - StreamingReadFeatureValuesRequest, -) -from .featurestore_service import ( - BatchCreateFeaturesOperationMetadata, - BatchCreateFeaturesRequest, - BatchCreateFeaturesResponse, - BatchReadFeatureValuesOperationMetadata, - BatchReadFeatureValuesRequest, - BatchReadFeatureValuesResponse, - CreateEntityTypeOperationMetadata, - CreateEntityTypeRequest, - CreateFeatureOperationMetadata, - CreateFeatureRequest, - CreateFeaturestoreOperationMetadata, - CreateFeaturestoreRequest, - DeleteEntityTypeRequest, - DeleteFeatureRequest, - DeleteFeaturestoreRequest, - DestinationFeatureSetting, - ExportFeatureValuesOperationMetadata, - ExportFeatureValuesRequest, - ExportFeatureValuesResponse, - FeatureValueDestination, - GetEntityTypeRequest, - GetFeatureRequest, - GetFeaturestoreRequest, - ImportFeatureValuesOperationMetadata, - ImportFeatureValuesRequest, - ImportFeatureValuesResponse, - ListEntityTypesRequest, - ListEntityTypesResponse, - ListFeaturesRequest, - ListFeaturesResponse, - ListFeaturestoresRequest, - ListFeaturestoresResponse, - SearchFeaturesRequest, - SearchFeaturesResponse, - UpdateEntityTypeRequest, - UpdateFeatureRequest, - UpdateFeaturestoreOperationMetadata, - UpdateFeaturestoreRequest, -) -from .hyperparameter_tuning_job import ( - HyperparameterTuningJob, -) -from .index import ( - Index, -) -from .index_endpoint import ( - DeployedIndex, - DeployedIndexAuthConfig, - IndexEndpoint, - IndexPrivateEndpoints, -) -from .index_endpoint_service import ( - CreateIndexEndpointOperationMetadata, - CreateIndexEndpointRequest, - DeleteIndexEndpointRequest, - DeployIndexOperationMetadata, - DeployIndexRequest, - DeployIndexResponse, - GetIndexEndpointRequest, - ListIndexEndpointsRequest, - ListIndexEndpointsResponse, - MutateDeployedIndexOperationMetadata, - MutateDeployedIndexRequest, - MutateDeployedIndexResponse, - UndeployIndexOperationMetadata, - UndeployIndexRequest, - UndeployIndexResponse, - UpdateIndexEndpointRequest, -) -from .index_service import ( - CreateIndexOperationMetadata, - CreateIndexRequest, - DeleteIndexRequest, - GetIndexRequest, - ListIndexesRequest, - ListIndexesResponse, - NearestNeighborSearchOperationMetadata, - UpdateIndexOperationMetadata, - UpdateIndexRequest, -) -from .io import ( - AvroSource, - BigQueryDestination, - BigQuerySource, - ContainerRegistryDestination, - CsvDestination, - CsvSource, - GcsDestination, - GcsSource, - TFRecordDestination, -) -from .job_service import ( - CancelBatchPredictionJobRequest, - CancelCustomJobRequest, - CancelDataLabelingJobRequest, - CancelHyperparameterTuningJobRequest, - CreateBatchPredictionJobRequest, - CreateCustomJobRequest, - CreateDataLabelingJobRequest, - CreateHyperparameterTuningJobRequest, - CreateModelDeploymentMonitoringJobRequest, - DeleteBatchPredictionJobRequest, - DeleteCustomJobRequest, - DeleteDataLabelingJobRequest, - DeleteHyperparameterTuningJobRequest, - DeleteModelDeploymentMonitoringJobRequest, - GetBatchPredictionJobRequest, - GetCustomJobRequest, - GetDataLabelingJobRequest, - GetHyperparameterTuningJobRequest, - GetModelDeploymentMonitoringJobRequest, - ListBatchPredictionJobsRequest, - ListBatchPredictionJobsResponse, - ListCustomJobsRequest, - ListCustomJobsResponse, - ListDataLabelingJobsRequest, - ListDataLabelingJobsResponse, - ListHyperparameterTuningJobsRequest, - ListHyperparameterTuningJobsResponse, - ListModelDeploymentMonitoringJobsRequest, - ListModelDeploymentMonitoringJobsResponse, - PauseModelDeploymentMonitoringJobRequest, - ResumeModelDeploymentMonitoringJobRequest, - SearchModelDeploymentMonitoringStatsAnomaliesRequest, - SearchModelDeploymentMonitoringStatsAnomaliesResponse, - UpdateModelDeploymentMonitoringJobOperationMetadata, - UpdateModelDeploymentMonitoringJobRequest, -) -from .lineage_subgraph import ( - LineageSubgraph, -) -from .machine_resources import ( - AutomaticResources, - AutoscalingMetricSpec, - BatchDedicatedResources, - DedicatedResources, - DiskSpec, - MachineSpec, - ResourcesConsumed, -) -from .manual_batch_tuning_parameters import ( - ManualBatchTuningParameters, -) -from .metadata_schema import ( - MetadataSchema, -) -from .metadata_service import ( - AddContextArtifactsAndExecutionsRequest, - AddContextArtifactsAndExecutionsResponse, - AddContextChildrenRequest, - AddContextChildrenResponse, - AddExecutionEventsRequest, - AddExecutionEventsResponse, - CreateArtifactRequest, - CreateContextRequest, - CreateExecutionRequest, - CreateMetadataSchemaRequest, - CreateMetadataStoreOperationMetadata, - CreateMetadataStoreRequest, - DeleteArtifactRequest, - DeleteContextRequest, - DeleteExecutionRequest, - DeleteMetadataStoreOperationMetadata, - DeleteMetadataStoreRequest, - GetArtifactRequest, - GetContextRequest, - GetExecutionRequest, - GetMetadataSchemaRequest, - GetMetadataStoreRequest, - ListArtifactsRequest, - ListArtifactsResponse, - ListContextsRequest, - ListContextsResponse, - ListExecutionsRequest, - ListExecutionsResponse, - ListMetadataSchemasRequest, - ListMetadataSchemasResponse, - ListMetadataStoresRequest, - ListMetadataStoresResponse, - PurgeArtifactsMetadata, - PurgeArtifactsRequest, - PurgeArtifactsResponse, - PurgeContextsMetadata, - PurgeContextsRequest, - PurgeContextsResponse, - PurgeExecutionsMetadata, - PurgeExecutionsRequest, - PurgeExecutionsResponse, - QueryArtifactLineageSubgraphRequest, - QueryContextLineageSubgraphRequest, - QueryExecutionInputsAndOutputsRequest, - UpdateArtifactRequest, - UpdateContextRequest, - UpdateExecutionRequest, -) -from .metadata_store import ( - MetadataStore, -) -from .migratable_resource import ( - MigratableResource, -) -from .migration_service import ( - BatchMigrateResourcesOperationMetadata, - BatchMigrateResourcesRequest, - BatchMigrateResourcesResponse, - MigrateResourceRequest, - MigrateResourceResponse, - SearchMigratableResourcesRequest, - SearchMigratableResourcesResponse, -) -from .model import ( - Model, - ModelContainerSpec, - Port, - PredictSchemata, -) -from .model_deployment_monitoring_job import ( - ModelDeploymentMonitoringBigQueryTable, - ModelDeploymentMonitoringJob, - ModelDeploymentMonitoringObjectiveConfig, - ModelDeploymentMonitoringScheduleConfig, - ModelMonitoringStatsAnomalies, - ModelDeploymentMonitoringObjectiveType, -) -from .model_evaluation import ( - ModelEvaluation, -) -from .model_evaluation_slice import ( - ModelEvaluationSlice, -) -from .model_monitoring import ( - ModelMonitoringAlertConfig, - ModelMonitoringObjectiveConfig, - SamplingStrategy, - ThresholdConfig, -) -from .model_service import ( - DeleteModelRequest, - ExportModelOperationMetadata, - ExportModelRequest, - ExportModelResponse, - GetModelEvaluationRequest, - GetModelEvaluationSliceRequest, - GetModelRequest, - ListModelEvaluationSlicesRequest, - ListModelEvaluationSlicesResponse, - ListModelEvaluationsRequest, - ListModelEvaluationsResponse, - ListModelsRequest, - ListModelsResponse, - UpdateModelRequest, - UploadModelOperationMetadata, - UploadModelRequest, - UploadModelResponse, -) -from .operation import ( - DeleteOperationMetadata, - GenericOperationMetadata, -) -from .pipeline_job import ( - PipelineJob, - PipelineJobDetail, - PipelineTaskDetail, - PipelineTaskExecutorDetail, -) -from .pipeline_service import ( - CancelPipelineJobRequest, - CancelTrainingPipelineRequest, - CreatePipelineJobRequest, - CreateTrainingPipelineRequest, - DeletePipelineJobRequest, - DeleteTrainingPipelineRequest, - GetPipelineJobRequest, - GetTrainingPipelineRequest, - ListPipelineJobsRequest, - ListPipelineJobsResponse, - ListTrainingPipelinesRequest, - ListTrainingPipelinesResponse, -) -from .prediction_service import ( - ExplainRequest, - ExplainResponse, - PredictRequest, - PredictResponse, - RawPredictRequest, -) -from .specialist_pool import ( - SpecialistPool, -) -from .specialist_pool_service import ( - CreateSpecialistPoolOperationMetadata, - CreateSpecialistPoolRequest, - DeleteSpecialistPoolRequest, - GetSpecialistPoolRequest, - ListSpecialistPoolsRequest, - ListSpecialistPoolsResponse, - UpdateSpecialistPoolOperationMetadata, - UpdateSpecialistPoolRequest, -) -from .study import ( - Measurement, - Study, - StudySpec, - Trial, -) -from .tensorboard import ( - Tensorboard, -) -from .tensorboard_data import ( - Scalar, - TensorboardBlob, - TensorboardBlobSequence, - TensorboardTensor, - TimeSeriesData, - TimeSeriesDataPoint, -) -from .tensorboard_experiment import ( - TensorboardExperiment, -) -from .tensorboard_run import ( - TensorboardRun, -) -from .tensorboard_service import ( - BatchCreateTensorboardRunsRequest, - BatchCreateTensorboardRunsResponse, - BatchCreateTensorboardTimeSeriesRequest, - BatchCreateTensorboardTimeSeriesResponse, - BatchReadTensorboardTimeSeriesDataRequest, - BatchReadTensorboardTimeSeriesDataResponse, - CreateTensorboardExperimentRequest, - CreateTensorboardOperationMetadata, - CreateTensorboardRequest, - CreateTensorboardRunRequest, - CreateTensorboardTimeSeriesRequest, - DeleteTensorboardExperimentRequest, - DeleteTensorboardRequest, - DeleteTensorboardRunRequest, - DeleteTensorboardTimeSeriesRequest, - ExportTensorboardTimeSeriesDataRequest, - ExportTensorboardTimeSeriesDataResponse, - GetTensorboardExperimentRequest, - GetTensorboardRequest, - GetTensorboardRunRequest, - GetTensorboardTimeSeriesRequest, - ListTensorboardExperimentsRequest, - ListTensorboardExperimentsResponse, - ListTensorboardRunsRequest, - ListTensorboardRunsResponse, - ListTensorboardsRequest, - ListTensorboardsResponse, - ListTensorboardTimeSeriesRequest, - ListTensorboardTimeSeriesResponse, - ReadTensorboardBlobDataRequest, - ReadTensorboardBlobDataResponse, - ReadTensorboardTimeSeriesDataRequest, - ReadTensorboardTimeSeriesDataResponse, - UpdateTensorboardExperimentRequest, - UpdateTensorboardOperationMetadata, - UpdateTensorboardRequest, - UpdateTensorboardRunRequest, - UpdateTensorboardTimeSeriesRequest, - WriteTensorboardExperimentDataRequest, - WriteTensorboardExperimentDataResponse, - WriteTensorboardRunDataRequest, - WriteTensorboardRunDataResponse, -) -from .tensorboard_time_series import ( - TensorboardTimeSeries, -) -from .training_pipeline import ( - FilterSplit, - FractionSplit, - InputDataConfig, - PredefinedSplit, - StratifiedSplit, - TimestampSplit, - TrainingPipeline, -) -from .types import ( - BoolArray, - DoubleArray, - Int64Array, - StringArray, -) -from .unmanaged_container_model import ( - UnmanagedContainerModel, -) -from .user_action_reference import ( - UserActionReference, -) -from .value import ( - Value, -) -from .vizier_service import ( - AddTrialMeasurementRequest, - CheckTrialEarlyStoppingStateMetatdata, - CheckTrialEarlyStoppingStateRequest, - CheckTrialEarlyStoppingStateResponse, - CompleteTrialRequest, - CreateStudyRequest, - CreateTrialRequest, - DeleteStudyRequest, - DeleteTrialRequest, - GetStudyRequest, - GetTrialRequest, - ListOptimalTrialsRequest, - ListOptimalTrialsResponse, - ListStudiesRequest, - ListStudiesResponse, - ListTrialsRequest, - ListTrialsResponse, - LookupStudyRequest, - StopTrialRequest, - SuggestTrialsMetadata, - SuggestTrialsRequest, - SuggestTrialsResponse, -) - -__all__ = ( - 'AcceleratorType', - 'Annotation', - 'AnnotationSpec', - 'Artifact', - 'BatchPredictionJob', - 'CompletionStats', - 'Context', - 'ContainerSpec', - 'CustomJob', - 'CustomJobSpec', - 'PythonPackageSpec', - 'Scheduling', - 'WorkerPoolSpec', - 'DataItem', - 'ActiveLearningConfig', - 'DataLabelingJob', - 'SampleConfig', - 'TrainingConfig', - 'Dataset', - 'ExportDataConfig', - 'ImportDataConfig', - 'CreateDatasetOperationMetadata', - 'CreateDatasetRequest', - 'DeleteDatasetRequest', - 'ExportDataOperationMetadata', - 'ExportDataRequest', - 'ExportDataResponse', - 'GetAnnotationSpecRequest', - 'GetDatasetRequest', - 'ImportDataOperationMetadata', - 'ImportDataRequest', - 'ImportDataResponse', - 'ListAnnotationsRequest', - 'ListAnnotationsResponse', - 'ListDataItemsRequest', - 'ListDataItemsResponse', - 'ListDatasetsRequest', - 'ListDatasetsResponse', - 'UpdateDatasetRequest', - 'DeployedIndexRef', - 'DeployedModelRef', - 'EncryptionSpec', - 'DeployedModel', - 'Endpoint', - 'PrivateEndpoints', - 'CreateEndpointOperationMetadata', - 'CreateEndpointRequest', - 'DeleteEndpointRequest', - 'DeployModelOperationMetadata', - 'DeployModelRequest', - 'DeployModelResponse', - 'GetEndpointRequest', - 'ListEndpointsRequest', - 'ListEndpointsResponse', - 'UndeployModelOperationMetadata', - 'UndeployModelRequest', - 'UndeployModelResponse', - 'UpdateEndpointRequest', - 'EntityType', - 'EnvVar', - 'Event', - 'Execution', - 'Attribution', - 'BlurBaselineConfig', - 'Explanation', - 'ExplanationMetadataOverride', - 'ExplanationParameters', - 'ExplanationSpec', - 'ExplanationSpecOverride', - 'FeatureNoiseSigma', - 'IntegratedGradientsAttribution', - 'ModelExplanation', - 'SampledShapleyAttribution', - 'SmoothGradConfig', - 'XraiAttribution', - 'ExplanationMetadata', - 'Feature', - 'FeatureStatsAnomaly', - 'FeatureSelector', - 'IdMatcher', - 'Featurestore', - 'FeatureValue', - 'FeatureValueList', - 'ReadFeatureValuesRequest', - 'ReadFeatureValuesResponse', - 'StreamingReadFeatureValuesRequest', - 'BatchCreateFeaturesOperationMetadata', - 'BatchCreateFeaturesRequest', - 'BatchCreateFeaturesResponse', - 'BatchReadFeatureValuesOperationMetadata', - 'BatchReadFeatureValuesRequest', - 'BatchReadFeatureValuesResponse', - 'CreateEntityTypeOperationMetadata', - 'CreateEntityTypeRequest', - 'CreateFeatureOperationMetadata', - 'CreateFeatureRequest', - 'CreateFeaturestoreOperationMetadata', - 'CreateFeaturestoreRequest', - 'DeleteEntityTypeRequest', - 'DeleteFeatureRequest', - 'DeleteFeaturestoreRequest', - 'DestinationFeatureSetting', - 'ExportFeatureValuesOperationMetadata', - 'ExportFeatureValuesRequest', - 'ExportFeatureValuesResponse', - 'FeatureValueDestination', - 'GetEntityTypeRequest', - 'GetFeatureRequest', - 'GetFeaturestoreRequest', - 'ImportFeatureValuesOperationMetadata', - 'ImportFeatureValuesRequest', - 'ImportFeatureValuesResponse', - 'ListEntityTypesRequest', - 'ListEntityTypesResponse', - 'ListFeaturesRequest', - 'ListFeaturesResponse', - 'ListFeaturestoresRequest', - 'ListFeaturestoresResponse', - 'SearchFeaturesRequest', - 'SearchFeaturesResponse', - 'UpdateEntityTypeRequest', - 'UpdateFeatureRequest', - 'UpdateFeaturestoreOperationMetadata', - 'UpdateFeaturestoreRequest', - 'HyperparameterTuningJob', - 'Index', - 'DeployedIndex', - 'DeployedIndexAuthConfig', - 'IndexEndpoint', - 'IndexPrivateEndpoints', - 'CreateIndexEndpointOperationMetadata', - 'CreateIndexEndpointRequest', - 'DeleteIndexEndpointRequest', - 'DeployIndexOperationMetadata', - 'DeployIndexRequest', - 'DeployIndexResponse', - 'GetIndexEndpointRequest', - 'ListIndexEndpointsRequest', - 'ListIndexEndpointsResponse', - 'MutateDeployedIndexOperationMetadata', - 'MutateDeployedIndexRequest', - 'MutateDeployedIndexResponse', - 'UndeployIndexOperationMetadata', - 'UndeployIndexRequest', - 'UndeployIndexResponse', - 'UpdateIndexEndpointRequest', - 'CreateIndexOperationMetadata', - 'CreateIndexRequest', - 'DeleteIndexRequest', - 'GetIndexRequest', - 'ListIndexesRequest', - 'ListIndexesResponse', - 'NearestNeighborSearchOperationMetadata', - 'UpdateIndexOperationMetadata', - 'UpdateIndexRequest', - 'AvroSource', - 'BigQueryDestination', - 'BigQuerySource', - 'ContainerRegistryDestination', - 'CsvDestination', - 'CsvSource', - 'GcsDestination', - 'GcsSource', - 'TFRecordDestination', - 'CancelBatchPredictionJobRequest', - 'CancelCustomJobRequest', - 'CancelDataLabelingJobRequest', - 'CancelHyperparameterTuningJobRequest', - 'CreateBatchPredictionJobRequest', - 'CreateCustomJobRequest', - 'CreateDataLabelingJobRequest', - 'CreateHyperparameterTuningJobRequest', - 'CreateModelDeploymentMonitoringJobRequest', - 'DeleteBatchPredictionJobRequest', - 'DeleteCustomJobRequest', - 'DeleteDataLabelingJobRequest', - 'DeleteHyperparameterTuningJobRequest', - 'DeleteModelDeploymentMonitoringJobRequest', - 'GetBatchPredictionJobRequest', - 'GetCustomJobRequest', - 'GetDataLabelingJobRequest', - 'GetHyperparameterTuningJobRequest', - 'GetModelDeploymentMonitoringJobRequest', - 'ListBatchPredictionJobsRequest', - 'ListBatchPredictionJobsResponse', - 'ListCustomJobsRequest', - 'ListCustomJobsResponse', - 'ListDataLabelingJobsRequest', - 'ListDataLabelingJobsResponse', - 'ListHyperparameterTuningJobsRequest', - 'ListHyperparameterTuningJobsResponse', - 'ListModelDeploymentMonitoringJobsRequest', - 'ListModelDeploymentMonitoringJobsResponse', - 'PauseModelDeploymentMonitoringJobRequest', - 'ResumeModelDeploymentMonitoringJobRequest', - 'SearchModelDeploymentMonitoringStatsAnomaliesRequest', - 'SearchModelDeploymentMonitoringStatsAnomaliesResponse', - 'UpdateModelDeploymentMonitoringJobOperationMetadata', - 'UpdateModelDeploymentMonitoringJobRequest', - 'JobState', - 'LineageSubgraph', - 'AutomaticResources', - 'AutoscalingMetricSpec', - 'BatchDedicatedResources', - 'DedicatedResources', - 'DiskSpec', - 'MachineSpec', - 'ResourcesConsumed', - 'ManualBatchTuningParameters', - 'MetadataSchema', - 'AddContextArtifactsAndExecutionsRequest', - 'AddContextArtifactsAndExecutionsResponse', - 'AddContextChildrenRequest', - 'AddContextChildrenResponse', - 'AddExecutionEventsRequest', - 'AddExecutionEventsResponse', - 'CreateArtifactRequest', - 'CreateContextRequest', - 'CreateExecutionRequest', - 'CreateMetadataSchemaRequest', - 'CreateMetadataStoreOperationMetadata', - 'CreateMetadataStoreRequest', - 'DeleteArtifactRequest', - 'DeleteContextRequest', - 'DeleteExecutionRequest', - 'DeleteMetadataStoreOperationMetadata', - 'DeleteMetadataStoreRequest', - 'GetArtifactRequest', - 'GetContextRequest', - 'GetExecutionRequest', - 'GetMetadataSchemaRequest', - 'GetMetadataStoreRequest', - 'ListArtifactsRequest', - 'ListArtifactsResponse', - 'ListContextsRequest', - 'ListContextsResponse', - 'ListExecutionsRequest', - 'ListExecutionsResponse', - 'ListMetadataSchemasRequest', - 'ListMetadataSchemasResponse', - 'ListMetadataStoresRequest', - 'ListMetadataStoresResponse', - 'PurgeArtifactsMetadata', - 'PurgeArtifactsRequest', - 'PurgeArtifactsResponse', - 'PurgeContextsMetadata', - 'PurgeContextsRequest', - 'PurgeContextsResponse', - 'PurgeExecutionsMetadata', - 'PurgeExecutionsRequest', - 'PurgeExecutionsResponse', - 'QueryArtifactLineageSubgraphRequest', - 'QueryContextLineageSubgraphRequest', - 'QueryExecutionInputsAndOutputsRequest', - 'UpdateArtifactRequest', - 'UpdateContextRequest', - 'UpdateExecutionRequest', - 'MetadataStore', - 'MigratableResource', - 'BatchMigrateResourcesOperationMetadata', - 'BatchMigrateResourcesRequest', - 'BatchMigrateResourcesResponse', - 'MigrateResourceRequest', - 'MigrateResourceResponse', - 'SearchMigratableResourcesRequest', - 'SearchMigratableResourcesResponse', - 'Model', - 'ModelContainerSpec', - 'Port', - 'PredictSchemata', - 'ModelDeploymentMonitoringBigQueryTable', - 'ModelDeploymentMonitoringJob', - 'ModelDeploymentMonitoringObjectiveConfig', - 'ModelDeploymentMonitoringScheduleConfig', - 'ModelMonitoringStatsAnomalies', - 'ModelDeploymentMonitoringObjectiveType', - 'ModelEvaluation', - 'ModelEvaluationSlice', - 'ModelMonitoringAlertConfig', - 'ModelMonitoringObjectiveConfig', - 'SamplingStrategy', - 'ThresholdConfig', - 'DeleteModelRequest', - 'ExportModelOperationMetadata', - 'ExportModelRequest', - 'ExportModelResponse', - 'GetModelEvaluationRequest', - 'GetModelEvaluationSliceRequest', - 'GetModelRequest', - 'ListModelEvaluationSlicesRequest', - 'ListModelEvaluationSlicesResponse', - 'ListModelEvaluationsRequest', - 'ListModelEvaluationsResponse', - 'ListModelsRequest', - 'ListModelsResponse', - 'UpdateModelRequest', - 'UploadModelOperationMetadata', - 'UploadModelRequest', - 'UploadModelResponse', - 'DeleteOperationMetadata', - 'GenericOperationMetadata', - 'PipelineJob', - 'PipelineJobDetail', - 'PipelineTaskDetail', - 'PipelineTaskExecutorDetail', - 'CancelPipelineJobRequest', - 'CancelTrainingPipelineRequest', - 'CreatePipelineJobRequest', - 'CreateTrainingPipelineRequest', - 'DeletePipelineJobRequest', - 'DeleteTrainingPipelineRequest', - 'GetPipelineJobRequest', - 'GetTrainingPipelineRequest', - 'ListPipelineJobsRequest', - 'ListPipelineJobsResponse', - 'ListTrainingPipelinesRequest', - 'ListTrainingPipelinesResponse', - 'PipelineState', - 'ExplainRequest', - 'ExplainResponse', - 'PredictRequest', - 'PredictResponse', - 'RawPredictRequest', - 'SpecialistPool', - 'CreateSpecialistPoolOperationMetadata', - 'CreateSpecialistPoolRequest', - 'DeleteSpecialistPoolRequest', - 'GetSpecialistPoolRequest', - 'ListSpecialistPoolsRequest', - 'ListSpecialistPoolsResponse', - 'UpdateSpecialistPoolOperationMetadata', - 'UpdateSpecialistPoolRequest', - 'Measurement', - 'Study', - 'StudySpec', - 'Trial', - 'Tensorboard', - 'Scalar', - 'TensorboardBlob', - 'TensorboardBlobSequence', - 'TensorboardTensor', - 'TimeSeriesData', - 'TimeSeriesDataPoint', - 'TensorboardExperiment', - 'TensorboardRun', - 'BatchCreateTensorboardRunsRequest', - 'BatchCreateTensorboardRunsResponse', - 'BatchCreateTensorboardTimeSeriesRequest', - 'BatchCreateTensorboardTimeSeriesResponse', - 'BatchReadTensorboardTimeSeriesDataRequest', - 'BatchReadTensorboardTimeSeriesDataResponse', - 'CreateTensorboardExperimentRequest', - 'CreateTensorboardOperationMetadata', - 'CreateTensorboardRequest', - 'CreateTensorboardRunRequest', - 'CreateTensorboardTimeSeriesRequest', - 'DeleteTensorboardExperimentRequest', - 'DeleteTensorboardRequest', - 'DeleteTensorboardRunRequest', - 'DeleteTensorboardTimeSeriesRequest', - 'ExportTensorboardTimeSeriesDataRequest', - 'ExportTensorboardTimeSeriesDataResponse', - 'GetTensorboardExperimentRequest', - 'GetTensorboardRequest', - 'GetTensorboardRunRequest', - 'GetTensorboardTimeSeriesRequest', - 'ListTensorboardExperimentsRequest', - 'ListTensorboardExperimentsResponse', - 'ListTensorboardRunsRequest', - 'ListTensorboardRunsResponse', - 'ListTensorboardsRequest', - 'ListTensorboardsResponse', - 'ListTensorboardTimeSeriesRequest', - 'ListTensorboardTimeSeriesResponse', - 'ReadTensorboardBlobDataRequest', - 'ReadTensorboardBlobDataResponse', - 'ReadTensorboardTimeSeriesDataRequest', - 'ReadTensorboardTimeSeriesDataResponse', - 'UpdateTensorboardExperimentRequest', - 'UpdateTensorboardOperationMetadata', - 'UpdateTensorboardRequest', - 'UpdateTensorboardRunRequest', - 'UpdateTensorboardTimeSeriesRequest', - 'WriteTensorboardExperimentDataRequest', - 'WriteTensorboardExperimentDataResponse', - 'WriteTensorboardRunDataRequest', - 'WriteTensorboardRunDataResponse', - 'TensorboardTimeSeries', - 'FilterSplit', - 'FractionSplit', - 'InputDataConfig', - 'PredefinedSplit', - 'StratifiedSplit', - 'TimestampSplit', - 'TrainingPipeline', - 'BoolArray', - 'DoubleArray', - 'Int64Array', - 'StringArray', - 'UnmanagedContainerModel', - 'UserActionReference', - 'Value', - 'AddTrialMeasurementRequest', - 'CheckTrialEarlyStoppingStateMetatdata', - 'CheckTrialEarlyStoppingStateRequest', - 'CheckTrialEarlyStoppingStateResponse', - 'CompleteTrialRequest', - 'CreateStudyRequest', - 'CreateTrialRequest', - 'DeleteStudyRequest', - 'DeleteTrialRequest', - 'GetStudyRequest', - 'GetTrialRequest', - 'ListOptimalTrialsRequest', - 'ListOptimalTrialsResponse', - 'ListStudiesRequest', - 'ListStudiesResponse', - 'ListTrialsRequest', - 'ListTrialsResponse', - 'LookupStudyRequest', - 'StopTrialRequest', - 'SuggestTrialsMetadata', - 'SuggestTrialsRequest', - 'SuggestTrialsResponse', -) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/accelerator_type.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/accelerator_type.py deleted file mode 100644 index 2c89d89653..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/accelerator_type.py +++ /dev/null @@ -1,38 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'AcceleratorType', - }, -) - - -class AcceleratorType(proto.Enum): - r"""Represents a hardware accelerator type.""" - ACCELERATOR_TYPE_UNSPECIFIED = 0 - NVIDIA_TESLA_K80 = 1 - NVIDIA_TESLA_P100 = 2 - NVIDIA_TESLA_V100 = 3 - NVIDIA_TESLA_P4 = 4 - NVIDIA_TESLA_T4 = 5 - NVIDIA_TESLA_A100 = 8 - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/annotation.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/annotation.py deleted file mode 100644 index 342d33e976..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/annotation.py +++ /dev/null @@ -1,129 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1.types import user_action_reference -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'Annotation', - }, -) - - -class Annotation(proto.Message): - r"""Used to assign specific AnnotationSpec to a particular area - of a DataItem or the whole part of the DataItem. - - Attributes: - name (str): - Output only. Resource name of the Annotation. - payload_schema_uri (str): - Required. Google Cloud Storage URI points to a YAML file - describing - [payload][google.cloud.aiplatform.v1.Annotation.payload]. - The schema is defined as an `OpenAPI 3.0.2 Schema - Object `__. - The schema files that can be used here are found in - gs://google-cloud-aiplatform/schema/dataset/annotation/, - note that the chosen schema must be consistent with the - parent Dataset's - [metadata][google.cloud.aiplatform.v1.Dataset.metadata_schema_uri]. - payload (google.protobuf.struct_pb2.Value): - Required. The schema of the payload can be found in - [payload_schema][google.cloud.aiplatform.v1.Annotation.payload_schema_uri]. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Annotation - was created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Annotation - was last updated. - etag (str): - Optional. Used to perform consistent read- - odify-write updates. If not set, a blind - "overwrite" update happens. - annotation_source (google.cloud.aiplatform_v1.types.UserActionReference): - Output only. The source of the Annotation. - labels (Sequence[google.cloud.aiplatform_v1.types.Annotation.LabelsEntry]): - Optional. The labels with user-defined metadata to organize - your Annotations. - - Label keys and values can be no longer than 64 characters - (Unicode codepoints), can only contain lowercase letters, - numeric characters, underscores and dashes. International - characters are allowed. No more than 64 user labels can be - associated with one Annotation(System labels are excluded). - - See https://goo.gl/xmQnxf for more information and examples - of labels. System reserved label keys are prefixed with - "aiplatform.googleapis.com/" and are immutable. Following - system labels exist for each Annotation: - - - "aiplatform.googleapis.com/annotation_set_name": - optional, name of the UI's annotation set this Annotation - belongs to. If not set, the Annotation is not visible in - the UI. - - - "aiplatform.googleapis.com/payload_schema": output only, - its value is the - [payload_schema's][google.cloud.aiplatform.v1.Annotation.payload_schema_uri] - title. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - payload_schema_uri = proto.Field( - proto.STRING, - number=2, - ) - payload = proto.Field( - proto.MESSAGE, - number=3, - message=struct_pb2.Value, - ) - create_time = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=7, - message=timestamp_pb2.Timestamp, - ) - etag = proto.Field( - proto.STRING, - number=8, - ) - annotation_source = proto.Field( - proto.MESSAGE, - number=5, - message=user_action_reference.UserActionReference, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=6, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/annotation_spec.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/annotation_spec.py deleted file mode 100644 index 950abfe6c4..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/annotation_spec.py +++ /dev/null @@ -1,78 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'AnnotationSpec', - }, -) - - -class AnnotationSpec(proto.Message): - r"""Identifies a concept with which DataItems may be annotated - with. - - Attributes: - name (str): - Output only. Resource name of the - AnnotationSpec. - display_name (str): - Required. The user-defined name of the - AnnotationSpec. The name can be up to 128 - characters long and can be consist of any UTF-8 - characters. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - AnnotationSpec was created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when AnnotationSpec - was last updated. - etag (str): - Optional. Used to perform consistent read- - odify-write updates. If not set, a blind - "overwrite" update happens. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - create_time = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - etag = proto.Field( - proto.STRING, - number=5, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/artifact.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/artifact.py deleted file mode 100644 index 8ef3d009d8..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/artifact.py +++ /dev/null @@ -1,153 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'Artifact', - }, -) - - -class Artifact(proto.Message): - r"""Instance of a general artifact. - - Attributes: - name (str): - Output only. The resource name of the - Artifact. - display_name (str): - User provided display name of the Artifact. - May be up to 128 Unicode characters. - uri (str): - The uniform resource identifier of the - artifact file. May be empty if there is no - actual artifact file. - etag (str): - An eTag used to perform consistent read- - odify-write updates. If not set, a blind - "overwrite" update happens. - labels (Sequence[google.cloud.aiplatform_v1.types.Artifact.LabelsEntry]): - The labels with user-defined metadata to - organize your Artifacts. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. No more than 64 user labels can be - associated with one Artifact (System labels are - excluded). - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Artifact was - created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Artifact was - last updated. - state (google.cloud.aiplatform_v1.types.Artifact.State): - The state of this Artifact. This is a - property of the Artifact, and does not imply or - capture any ongoing process. This property is - managed by clients (such as Vertex AI - Pipelines), and the system does not prescribe or - check the validity of state transitions. - schema_title (str): - The title of the schema describing the - metadata. - Schema title and version is expected to be - registered in earlier Create Schema calls. And - both are used together as unique identifiers to - identify schemas within the local metadata - store. - schema_version (str): - The version of the schema in schema_name to use. - - Schema title and version is expected to be registered in - earlier Create Schema calls. And both are used together as - unique identifiers to identify schemas within the local - metadata store. - metadata (google.protobuf.struct_pb2.Struct): - Properties of the Artifact. - The size of this field should not exceed 200KB. - description (str): - Description of the Artifact - """ - class State(proto.Enum): - r"""Describes the state of the Artifact.""" - STATE_UNSPECIFIED = 0 - PENDING = 1 - LIVE = 2 - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - uri = proto.Field( - proto.STRING, - number=6, - ) - etag = proto.Field( - proto.STRING, - number=9, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=10, - ) - create_time = proto.Field( - proto.MESSAGE, - number=11, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=12, - message=timestamp_pb2.Timestamp, - ) - state = proto.Field( - proto.ENUM, - number=13, - enum=State, - ) - schema_title = proto.Field( - proto.STRING, - number=14, - ) - schema_version = proto.Field( - proto.STRING, - number=15, - ) - metadata = proto.Field( - proto.MESSAGE, - number=16, - message=struct_pb2.Struct, - ) - description = proto.Field( - proto.STRING, - number=17, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/batch_prediction_job.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/batch_prediction_job.py deleted file mode 100644 index 3ad80106a8..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/batch_prediction_job.py +++ /dev/null @@ -1,501 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1.types import completion_stats as gca_completion_stats -from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec -from google.cloud.aiplatform_v1.types import explanation -from google.cloud.aiplatform_v1.types import io -from google.cloud.aiplatform_v1.types import job_state -from google.cloud.aiplatform_v1.types import machine_resources -from google.cloud.aiplatform_v1.types import manual_batch_tuning_parameters as gca_manual_batch_tuning_parameters -from google.cloud.aiplatform_v1.types import unmanaged_container_model as gca_unmanaged_container_model -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'BatchPredictionJob', - }, -) - - -class BatchPredictionJob(proto.Message): - r"""A job that uses a - [Model][google.cloud.aiplatform.v1.BatchPredictionJob.model] to - produce predictions on multiple [input - instances][google.cloud.aiplatform.v1.BatchPredictionJob.input_config]. - If predictions for significant portion of the instances fail, the - job may finish without attempting predictions for all remaining - instances. - - Attributes: - name (str): - Output only. Resource name of the - BatchPredictionJob. - display_name (str): - Required. The user-defined name of this - BatchPredictionJob. - model (str): - The name of the Model resoure that produces the predictions - via this job, must share the same ancestor Location. - Starting this job has no impact on any existing deployments - of the Model and their resources. Exactly one of model and - unmanaged_container_model must be set. - unmanaged_container_model (google.cloud.aiplatform_v1.types.UnmanagedContainerModel): - Contains model information necessary to perform batch - prediction without requiring uploading to model registry. - Exactly one of model and unmanaged_container_model must be - set. - input_config (google.cloud.aiplatform_v1.types.BatchPredictionJob.InputConfig): - Required. Input configuration of the instances on which - predictions are performed. The schema of any single instance - may be specified via the - [Model's][google.cloud.aiplatform.v1.BatchPredictionJob.model] - [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] - [instance_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri]. - model_parameters (google.protobuf.struct_pb2.Value): - The parameters that govern the predictions. The schema of - the parameters may be specified via the - [Model's][google.cloud.aiplatform.v1.BatchPredictionJob.model] - [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] - [parameters_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.parameters_schema_uri]. - output_config (google.cloud.aiplatform_v1.types.BatchPredictionJob.OutputConfig): - Required. The Configuration specifying where output - predictions should be written. The schema of any single - prediction may be specified as a concatenation of - [Model's][google.cloud.aiplatform.v1.BatchPredictionJob.model] - [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] - [instance_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri] - and - [prediction_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.prediction_schema_uri]. - dedicated_resources (google.cloud.aiplatform_v1.types.BatchDedicatedResources): - The config of resources used by the Model during the batch - prediction. If the Model - [supports][google.cloud.aiplatform.v1.Model.supported_deployment_resources_types] - DEDICATED_RESOURCES this config may be provided (and the job - will use these resources), if the Model doesn't support - AUTOMATIC_RESOURCES, this config must be provided. - manual_batch_tuning_parameters (google.cloud.aiplatform_v1.types.ManualBatchTuningParameters): - Immutable. Parameters configuring the batch behavior. - Currently only applicable when - [dedicated_resources][google.cloud.aiplatform.v1.BatchPredictionJob.dedicated_resources] - are used (in other cases Vertex AI does the tuning itself). - generate_explanation (bool): - Generate explanation with the batch prediction results. - - When set to ``true``, the batch prediction output changes - based on the ``predictions_format`` field of the - [BatchPredictionJob.output_config][google.cloud.aiplatform.v1.BatchPredictionJob.output_config] - object: - - - ``bigquery``: output includes a column named - ``explanation``. The value is a struct that conforms to - the [Explanation][google.cloud.aiplatform.v1.Explanation] - object. - - ``jsonl``: The JSON objects on each line include an - additional entry keyed ``explanation``. The value of the - entry is a JSON object that conforms to the - [Explanation][google.cloud.aiplatform.v1.Explanation] - object. - - ``csv``: Generating explanations for CSV format is not - supported. - - If this field is set to true, either the - [Model.explanation_spec][google.cloud.aiplatform.v1.Model.explanation_spec] - or - [explanation_spec][google.cloud.aiplatform.v1.BatchPredictionJob.explanation_spec] - must be populated. - explanation_spec (google.cloud.aiplatform_v1.types.ExplanationSpec): - Explanation configuration for this BatchPredictionJob. Can - be specified only if - [generate_explanation][google.cloud.aiplatform.v1.BatchPredictionJob.generate_explanation] - is set to ``true``. - - This value overrides the value of - [Model.explanation_spec][google.cloud.aiplatform.v1.Model.explanation_spec]. - All fields of - [explanation_spec][google.cloud.aiplatform.v1.BatchPredictionJob.explanation_spec] - are optional in the request. If a field of the - [explanation_spec][google.cloud.aiplatform.v1.BatchPredictionJob.explanation_spec] - object is not populated, the corresponding field of the - [Model.explanation_spec][google.cloud.aiplatform.v1.Model.explanation_spec] - object is inherited. - output_info (google.cloud.aiplatform_v1.types.BatchPredictionJob.OutputInfo): - Output only. Information further describing - the output of this job. - state (google.cloud.aiplatform_v1.types.JobState): - Output only. The detailed state of the job. - error (google.rpc.status_pb2.Status): - Output only. Only populated when the job's state is - JOB_STATE_FAILED or JOB_STATE_CANCELLED. - partial_failures (Sequence[google.rpc.status_pb2.Status]): - Output only. Partial failures encountered. - For example, single files that can't be read. - This field never exceeds 20 entries. - Status details fields contain standard GCP error - details. - resources_consumed (google.cloud.aiplatform_v1.types.ResourcesConsumed): - Output only. Information about resources that - had been consumed by this job. Provided in real - time at best effort basis, as well as a final - value once the job completes. - - Note: This field currently may be not populated - for batch predictions that use AutoML Models. - completion_stats (google.cloud.aiplatform_v1.types.CompletionStats): - Output only. Statistics on completed and - failed prediction instances. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the BatchPredictionJob - was created. - start_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the BatchPredictionJob for the first - time entered the ``JOB_STATE_RUNNING`` state. - end_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the BatchPredictionJob entered any of - the following states: ``JOB_STATE_SUCCEEDED``, - ``JOB_STATE_FAILED``, ``JOB_STATE_CANCELLED``. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the BatchPredictionJob - was most recently updated. - labels (Sequence[google.cloud.aiplatform_v1.types.BatchPredictionJob.LabelsEntry]): - The labels with user-defined metadata to - organize BatchPredictionJobs. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. - See https://goo.gl/xmQnxf for more information - and examples of labels. - encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec): - Customer-managed encryption key options for a - BatchPredictionJob. If this is set, then all - resources created by the BatchPredictionJob will - be encrypted with the provided encryption key. - """ - - class InputConfig(proto.Message): - r"""Configures the input to - [BatchPredictionJob][google.cloud.aiplatform.v1.BatchPredictionJob]. - See - [Model.supported_input_storage_formats][google.cloud.aiplatform.v1.Model.supported_input_storage_formats] - for Model's supported input formats, and how instances should be - expressed via any of them. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - gcs_source (google.cloud.aiplatform_v1.types.GcsSource): - The Cloud Storage location for the input - instances. - - This field is a member of `oneof`_ ``source``. - bigquery_source (google.cloud.aiplatform_v1.types.BigQuerySource): - The BigQuery location of the input table. - The schema of the table should be in the format - described by the given context OpenAPI Schema, - if one is provided. The table may contain - additional columns that are not described by the - schema, and they will be ignored. - - This field is a member of `oneof`_ ``source``. - instances_format (str): - Required. The format in which instances are given, must be - one of the - [Model's][google.cloud.aiplatform.v1.BatchPredictionJob.model] - [supported_input_storage_formats][google.cloud.aiplatform.v1.Model.supported_input_storage_formats]. - """ - - gcs_source = proto.Field( - proto.MESSAGE, - number=2, - oneof='source', - message=io.GcsSource, - ) - bigquery_source = proto.Field( - proto.MESSAGE, - number=3, - oneof='source', - message=io.BigQuerySource, - ) - instances_format = proto.Field( - proto.STRING, - number=1, - ) - - class OutputConfig(proto.Message): - r"""Configures the output of - [BatchPredictionJob][google.cloud.aiplatform.v1.BatchPredictionJob]. - See - [Model.supported_output_storage_formats][google.cloud.aiplatform.v1.Model.supported_output_storage_formats] - for supported output formats, and how predictions are expressed via - any of them. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - gcs_destination (google.cloud.aiplatform_v1.types.GcsDestination): - The Cloud Storage location of the directory where the output - is to be written to. In the given directory a new directory - is created. Its name is - ``prediction--``, where - timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. - Inside of it files ``predictions_0001.``, - ``predictions_0002.``, ..., - ``predictions_N.`` are created where - ```` depends on chosen - [predictions_format][google.cloud.aiplatform.v1.BatchPredictionJob.OutputConfig.predictions_format], - and N may equal 0001 and depends on the total number of - successfully predicted instances. If the Model has both - [instance][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri] - and - [prediction][google.cloud.aiplatform.v1.PredictSchemata.parameters_schema_uri] - schemata defined then each such file contains predictions as - per the - [predictions_format][google.cloud.aiplatform.v1.BatchPredictionJob.OutputConfig.predictions_format]. - If prediction for any instance failed (partially or - completely), then an additional ``errors_0001.``, - ``errors_0002.``,..., ``errors_N.`` - files are created (N depends on total number of failed - predictions). These files contain the failed instances, as - per their schema, followed by an additional ``error`` field - which as value has [google.rpc.Status][google.rpc.Status] - containing only ``code`` and ``message`` fields. - - This field is a member of `oneof`_ ``destination``. - bigquery_destination (google.cloud.aiplatform_v1.types.BigQueryDestination): - The BigQuery project or dataset location where the output is - to be written to. If project is provided, a new dataset is - created with name - ``prediction__`` where - is made BigQuery-dataset-name compatible (for example, most - special characters become underscores), and timestamp is in - YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the - dataset two tables will be created, ``predictions``, and - ``errors``. If the Model has both - [instance][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri] - and - [prediction][google.cloud.aiplatform.v1.PredictSchemata.parameters_schema_uri] - schemata defined then the tables have columns as follows: - The ``predictions`` table contains instances for which the - prediction succeeded, it has columns as per a concatenation - of the Model's instance and prediction schemata. The - ``errors`` table contains rows for which the prediction has - failed, it has instance columns, as per the instance schema, - followed by a single "errors" column, which as values has - [google.rpc.Status][google.rpc.Status] represented as a - STRUCT, and containing only ``code`` and ``message``. - - This field is a member of `oneof`_ ``destination``. - predictions_format (str): - Required. The format in which Vertex AI gives the - predictions, must be one of the - [Model's][google.cloud.aiplatform.v1.BatchPredictionJob.model] - [supported_output_storage_formats][google.cloud.aiplatform.v1.Model.supported_output_storage_formats]. - """ - - gcs_destination = proto.Field( - proto.MESSAGE, - number=2, - oneof='destination', - message=io.GcsDestination, - ) - bigquery_destination = proto.Field( - proto.MESSAGE, - number=3, - oneof='destination', - message=io.BigQueryDestination, - ) - predictions_format = proto.Field( - proto.STRING, - number=1, - ) - - class OutputInfo(proto.Message): - r"""Further describes this job's output. Supplements - [output_config][google.cloud.aiplatform.v1.BatchPredictionJob.output_config]. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - gcs_output_directory (str): - Output only. The full path of the Cloud - Storage directory created, into which the - prediction output is written. - - This field is a member of `oneof`_ ``output_location``. - bigquery_output_dataset (str): - Output only. The path of the BigQuery dataset created, in - ``bq://projectId.bqDatasetId`` format, into which the - prediction output is written. - - This field is a member of `oneof`_ ``output_location``. - bigquery_output_table (str): - Output only. The name of the BigQuery table created, in - ``predictions_`` format, into which the - prediction output is written. Can be used by UI to generate - the BigQuery output path, for example. - """ - - gcs_output_directory = proto.Field( - proto.STRING, - number=1, - oneof='output_location', - ) - bigquery_output_dataset = proto.Field( - proto.STRING, - number=2, - oneof='output_location', - ) - bigquery_output_table = proto.Field( - proto.STRING, - number=4, - ) - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - model = proto.Field( - proto.STRING, - number=3, - ) - unmanaged_container_model = proto.Field( - proto.MESSAGE, - number=28, - message=gca_unmanaged_container_model.UnmanagedContainerModel, - ) - input_config = proto.Field( - proto.MESSAGE, - number=4, - message=InputConfig, - ) - model_parameters = proto.Field( - proto.MESSAGE, - number=5, - message=struct_pb2.Value, - ) - output_config = proto.Field( - proto.MESSAGE, - number=6, - message=OutputConfig, - ) - dedicated_resources = proto.Field( - proto.MESSAGE, - number=7, - message=machine_resources.BatchDedicatedResources, - ) - manual_batch_tuning_parameters = proto.Field( - proto.MESSAGE, - number=8, - message=gca_manual_batch_tuning_parameters.ManualBatchTuningParameters, - ) - generate_explanation = proto.Field( - proto.BOOL, - number=23, - ) - explanation_spec = proto.Field( - proto.MESSAGE, - number=25, - message=explanation.ExplanationSpec, - ) - output_info = proto.Field( - proto.MESSAGE, - number=9, - message=OutputInfo, - ) - state = proto.Field( - proto.ENUM, - number=10, - enum=job_state.JobState, - ) - error = proto.Field( - proto.MESSAGE, - number=11, - message=status_pb2.Status, - ) - partial_failures = proto.RepeatedField( - proto.MESSAGE, - number=12, - message=status_pb2.Status, - ) - resources_consumed = proto.Field( - proto.MESSAGE, - number=13, - message=machine_resources.ResourcesConsumed, - ) - completion_stats = proto.Field( - proto.MESSAGE, - number=14, - message=gca_completion_stats.CompletionStats, - ) - create_time = proto.Field( - proto.MESSAGE, - number=15, - message=timestamp_pb2.Timestamp, - ) - start_time = proto.Field( - proto.MESSAGE, - number=16, - message=timestamp_pb2.Timestamp, - ) - end_time = proto.Field( - proto.MESSAGE, - number=17, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=18, - message=timestamp_pb2.Timestamp, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=19, - ) - encryption_spec = proto.Field( - proto.MESSAGE, - number=24, - message=gca_encryption_spec.EncryptionSpec, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/completion_stats.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/completion_stats.py deleted file mode 100644 index 289efbc59b..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/completion_stats.py +++ /dev/null @@ -1,63 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'CompletionStats', - }, -) - - -class CompletionStats(proto.Message): - r"""Success and error statistics of processing multiple entities - (for example, DataItems or structured data rows) in batch. - - Attributes: - successful_count (int): - Output only. The number of entities that had - been processed successfully. - failed_count (int): - Output only. The number of entities for which - any error was encountered. - incomplete_count (int): - Output only. In cases when enough errors are - encountered a job, pipeline, or operation may be - failed as a whole. Below is the number of - entities for which the processing had not been - finished (either in successful or failed state). - Set to -1 if the number is unknown (for example, - the operation failed before the total entity - number could be collected). - """ - - successful_count = proto.Field( - proto.INT64, - number=1, - ) - failed_count = proto.Field( - proto.INT64, - number=2, - ) - incomplete_count = proto.Field( - proto.INT64, - number=3, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/context.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/context.py deleted file mode 100644 index 91bc7843be..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/context.py +++ /dev/null @@ -1,136 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'Context', - }, -) - - -class Context(proto.Message): - r"""Instance of a general context. - - Attributes: - name (str): - Output only. The resource name of the - Context. - display_name (str): - User provided display name of the Context. - May be up to 128 Unicode characters. - etag (str): - An eTag used to perform consistent read- - odify-write updates. If not set, a blind - "overwrite" update happens. - labels (Sequence[google.cloud.aiplatform_v1.types.Context.LabelsEntry]): - The labels with user-defined metadata to - organize your Contexts. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. No more than 64 user labels can be - associated with one Context (System labels are - excluded). - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Context was - created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Context was - last updated. - parent_contexts (Sequence[str]): - Output only. A list of resource names of Contexts that are - parents of this Context. A Context may have at most 10 - parent_contexts. - schema_title (str): - The title of the schema describing the - metadata. - Schema title and version is expected to be - registered in earlier Create Schema calls. And - both are used together as unique identifiers to - identify schemas within the local metadata - store. - schema_version (str): - The version of the schema in schema_name to use. - - Schema title and version is expected to be registered in - earlier Create Schema calls. And both are used together as - unique identifiers to identify schemas within the local - metadata store. - metadata (google.protobuf.struct_pb2.Struct): - Properties of the Context. - The size of this field should not exceed 200KB. - description (str): - Description of the Context - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - etag = proto.Field( - proto.STRING, - number=8, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=9, - ) - create_time = proto.Field( - proto.MESSAGE, - number=10, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=11, - message=timestamp_pb2.Timestamp, - ) - parent_contexts = proto.RepeatedField( - proto.STRING, - number=12, - ) - schema_title = proto.Field( - proto.STRING, - number=13, - ) - schema_version = proto.Field( - proto.STRING, - number=14, - ) - metadata = proto.Field( - proto.MESSAGE, - number=15, - message=struct_pb2.Struct, - ) - description = proto.Field( - proto.STRING, - number=16, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/custom_job.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/custom_job.py deleted file mode 100644 index d2b6223a7e..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/custom_job.py +++ /dev/null @@ -1,455 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec -from google.cloud.aiplatform_v1.types import env_var -from google.cloud.aiplatform_v1.types import io -from google.cloud.aiplatform_v1.types import job_state -from google.cloud.aiplatform_v1.types import machine_resources -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'CustomJob', - 'CustomJobSpec', - 'WorkerPoolSpec', - 'ContainerSpec', - 'PythonPackageSpec', - 'Scheduling', - }, -) - - -class CustomJob(proto.Message): - r"""Represents a job that runs custom workloads such as a Docker - container or a Python package. A CustomJob can have multiple - worker pools and each worker pool can have its own machine and - input spec. A CustomJob will be cleaned up once the job enters - terminal state (failed or succeeded). - - Attributes: - name (str): - Output only. Resource name of a CustomJob. - display_name (str): - Required. The display name of the CustomJob. - The name can be up to 128 characters long and - can be consist of any UTF-8 characters. - job_spec (google.cloud.aiplatform_v1.types.CustomJobSpec): - Required. Job spec. - state (google.cloud.aiplatform_v1.types.JobState): - Output only. The detailed state of the job. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the CustomJob was - created. - start_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the CustomJob for the first time - entered the ``JOB_STATE_RUNNING`` state. - end_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the CustomJob entered any of the - following states: ``JOB_STATE_SUCCEEDED``, - ``JOB_STATE_FAILED``, ``JOB_STATE_CANCELLED``. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the CustomJob was most - recently updated. - error (google.rpc.status_pb2.Status): - Output only. Only populated when job's state is - ``JOB_STATE_FAILED`` or ``JOB_STATE_CANCELLED``. - labels (Sequence[google.cloud.aiplatform_v1.types.CustomJob.LabelsEntry]): - The labels with user-defined metadata to - organize CustomJobs. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. - See https://goo.gl/xmQnxf for more information - and examples of labels. - encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec): - Customer-managed encryption key options for a - CustomJob. If this is set, then all resources - created by the CustomJob will be encrypted with - the provided encryption key. - web_access_uris (Sequence[google.cloud.aiplatform_v1.types.CustomJob.WebAccessUrisEntry]): - Output only. URIs for accessing `interactive - shells `__ - (one URI for each training node). Only available if - [job_spec.enable_web_access][google.cloud.aiplatform.v1.CustomJobSpec.enable_web_access] - is ``true``. - - The keys are names of each node in the training job; for - example, ``workerpool0-0`` for the primary node, - ``workerpool1-0`` for the first node in the second worker - pool, and ``workerpool1-1`` for the second node in the - second worker pool. - - The values are the URIs for each node's interactive shell. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - job_spec = proto.Field( - proto.MESSAGE, - number=4, - message='CustomJobSpec', - ) - state = proto.Field( - proto.ENUM, - number=5, - enum=job_state.JobState, - ) - create_time = proto.Field( - proto.MESSAGE, - number=6, - message=timestamp_pb2.Timestamp, - ) - start_time = proto.Field( - proto.MESSAGE, - number=7, - message=timestamp_pb2.Timestamp, - ) - end_time = proto.Field( - proto.MESSAGE, - number=8, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=9, - message=timestamp_pb2.Timestamp, - ) - error = proto.Field( - proto.MESSAGE, - number=10, - message=status_pb2.Status, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=11, - ) - encryption_spec = proto.Field( - proto.MESSAGE, - number=12, - message=gca_encryption_spec.EncryptionSpec, - ) - web_access_uris = proto.MapField( - proto.STRING, - proto.STRING, - number=16, - ) - - -class CustomJobSpec(proto.Message): - r"""Represents the spec of a CustomJob. - - Attributes: - worker_pool_specs (Sequence[google.cloud.aiplatform_v1.types.WorkerPoolSpec]): - Required. The spec of the worker pools - including machine type and Docker image. All - worker pools except the first one are optional - and can be skipped by providing an empty value. - scheduling (google.cloud.aiplatform_v1.types.Scheduling): - Scheduling options for a CustomJob. - service_account (str): - Specifies the service account for workload run-as account. - Users submitting jobs must have act-as permission on this - run-as account. If unspecified, the `Vertex AI Custom Code - Service - Agent `__ - for the CustomJob's project is used. - network (str): - The full name of the Compute Engine - `network `__ - to which the Job should be peered. For example, - ``projects/12345/global/networks/myVPC``. - `Format `__ - is of the form - ``projects/{project}/global/networks/{network}``. Where - {project} is a project number, as in ``12345``, and - {network} is a network name. - - To specify this field, you must have already `configured VPC - Network Peering for Vertex - AI `__. - - If this field is left unspecified, the job is not peered - with any network. - base_output_directory (google.cloud.aiplatform_v1.types.GcsDestination): - The Cloud Storage location to store the output of this - CustomJob or HyperparameterTuningJob. For - HyperparameterTuningJob, the baseOutputDirectory of each - child CustomJob backing a Trial is set to a subdirectory of - name [id][google.cloud.aiplatform.v1.Trial.id] under its - parent HyperparameterTuningJob's baseOutputDirectory. - - The following Vertex AI environment variables will be passed - to containers or python modules when this field is set: - - For CustomJob: - - - AIP_MODEL_DIR = ``/model/`` - - AIP_CHECKPOINT_DIR = - ``/checkpoints/`` - - AIP_TENSORBOARD_LOG_DIR = - ``/logs/`` - - For CustomJob backing a Trial of HyperparameterTuningJob: - - - AIP_MODEL_DIR = - ``//model/`` - - AIP_CHECKPOINT_DIR = - ``//checkpoints/`` - - AIP_TENSORBOARD_LOG_DIR = - ``//logs/`` - tensorboard (str): - Optional. The name of a Vertex AI - [Tensorboard][google.cloud.aiplatform.v1.Tensorboard] - resource to which this CustomJob will upload Tensorboard - logs. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` - enable_web_access (bool): - Optional. Whether you want Vertex AI to enable `interactive - shell - access `__ - to training containers. - - If set to ``true``, you can access interactive shells at the - URIs given by - [CustomJob.web_access_uris][google.cloud.aiplatform.v1.CustomJob.web_access_uris] - or - [Trial.web_access_uris][google.cloud.aiplatform.v1.Trial.web_access_uris] - (within - [HyperparameterTuningJob.trials][google.cloud.aiplatform.v1.HyperparameterTuningJob.trials]). - """ - - worker_pool_specs = proto.RepeatedField( - proto.MESSAGE, - number=1, - message='WorkerPoolSpec', - ) - scheduling = proto.Field( - proto.MESSAGE, - number=3, - message='Scheduling', - ) - service_account = proto.Field( - proto.STRING, - number=4, - ) - network = proto.Field( - proto.STRING, - number=5, - ) - base_output_directory = proto.Field( - proto.MESSAGE, - number=6, - message=io.GcsDestination, - ) - tensorboard = proto.Field( - proto.STRING, - number=7, - ) - enable_web_access = proto.Field( - proto.BOOL, - number=10, - ) - - -class WorkerPoolSpec(proto.Message): - r"""Represents the spec of a worker pool in a job. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - container_spec (google.cloud.aiplatform_v1.types.ContainerSpec): - The custom container task. - - This field is a member of `oneof`_ ``task``. - python_package_spec (google.cloud.aiplatform_v1.types.PythonPackageSpec): - The Python packaged task. - - This field is a member of `oneof`_ ``task``. - machine_spec (google.cloud.aiplatform_v1.types.MachineSpec): - Optional. Immutable. The specification of a - single machine. - replica_count (int): - Optional. The number of worker replicas to - use for this worker pool. - disk_spec (google.cloud.aiplatform_v1.types.DiskSpec): - Disk spec. - """ - - container_spec = proto.Field( - proto.MESSAGE, - number=6, - oneof='task', - message='ContainerSpec', - ) - python_package_spec = proto.Field( - proto.MESSAGE, - number=7, - oneof='task', - message='PythonPackageSpec', - ) - machine_spec = proto.Field( - proto.MESSAGE, - number=1, - message=machine_resources.MachineSpec, - ) - replica_count = proto.Field( - proto.INT64, - number=2, - ) - disk_spec = proto.Field( - proto.MESSAGE, - number=5, - message=machine_resources.DiskSpec, - ) - - -class ContainerSpec(proto.Message): - r"""The spec of a Container. - - Attributes: - image_uri (str): - Required. The URI of a container image in the - Container Registry that is to be run on each - worker replica. - command (Sequence[str]): - The command to be invoked when the container - is started. It overrides the entrypoint - instruction in Dockerfile when provided. - args (Sequence[str]): - The arguments to be passed when starting the - container. - env (Sequence[google.cloud.aiplatform_v1.types.EnvVar]): - Environment variables to be passed to the - container. Maximum limit is 100. - """ - - image_uri = proto.Field( - proto.STRING, - number=1, - ) - command = proto.RepeatedField( - proto.STRING, - number=2, - ) - args = proto.RepeatedField( - proto.STRING, - number=3, - ) - env = proto.RepeatedField( - proto.MESSAGE, - number=4, - message=env_var.EnvVar, - ) - - -class PythonPackageSpec(proto.Message): - r"""The spec of a Python packaged code. - - Attributes: - executor_image_uri (str): - Required. The URI of a container image in Artifact Registry - that will run the provided Python package. Vertex AI - provides a wide range of executor images with pre-installed - packages to meet users' various use cases. See the list of - `pre-built containers for - training `__. - You must use an image from this list. - package_uris (Sequence[str]): - Required. The Google Cloud Storage location - of the Python package files which are the - training program and its dependent packages. The - maximum number of package URIs is 100. - python_module (str): - Required. The Python module name to run after - installing the packages. - args (Sequence[str]): - Command line arguments to be passed to the - Python task. - env (Sequence[google.cloud.aiplatform_v1.types.EnvVar]): - Environment variables to be passed to the - python module. Maximum limit is 100. - """ - - executor_image_uri = proto.Field( - proto.STRING, - number=1, - ) - package_uris = proto.RepeatedField( - proto.STRING, - number=2, - ) - python_module = proto.Field( - proto.STRING, - number=3, - ) - args = proto.RepeatedField( - proto.STRING, - number=4, - ) - env = proto.RepeatedField( - proto.MESSAGE, - number=5, - message=env_var.EnvVar, - ) - - -class Scheduling(proto.Message): - r"""All parameters related to queuing and scheduling of custom - jobs. - - Attributes: - timeout (google.protobuf.duration_pb2.Duration): - The maximum job running time. The default is - 7 days. - restart_job_on_worker_restart (bool): - Restarts the entire CustomJob if a worker - gets restarted. This feature can be used by - distributed training jobs that are not resilient - to workers leaving and joining a job. - """ - - timeout = proto.Field( - proto.MESSAGE, - number=1, - message=duration_pb2.Duration, - ) - restart_job_on_worker_restart = proto.Field( - proto.BOOL, - number=3, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/data_item.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/data_item.py deleted file mode 100644 index 0ec4a5901e..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/data_item.py +++ /dev/null @@ -1,101 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'DataItem', - }, -) - - -class DataItem(proto.Message): - r"""A piece of data in a Dataset. Could be an image, a video, a - document or plain text. - - Attributes: - name (str): - Output only. The resource name of the - DataItem. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this DataItem was - created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this DataItem was - last updated. - labels (Sequence[google.cloud.aiplatform_v1.types.DataItem.LabelsEntry]): - Optional. The labels with user-defined - metadata to organize your DataItems. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. No more than 64 user labels can be - associated with one DataItem(System labels are - excluded). - - See https://goo.gl/xmQnxf for more information - and examples of labels. System reserved label - keys are prefixed with - "aiplatform.googleapis.com/" and are immutable. - payload (google.protobuf.struct_pb2.Value): - Required. The data that the DataItem represents (for - example, an image or a text snippet). The schema of the - payload is stored in the parent Dataset's [metadata - schema's][google.cloud.aiplatform.v1.Dataset.metadata_schema_uri] - dataItemSchemaUri field. - etag (str): - Optional. Used to perform consistent read- - odify-write updates. If not set, a blind - "overwrite" update happens. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - create_time = proto.Field( - proto.MESSAGE, - number=2, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=6, - message=timestamp_pb2.Timestamp, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=3, - ) - payload = proto.Field( - proto.MESSAGE, - number=4, - message=struct_pb2.Value, - ) - etag = proto.Field( - proto.STRING, - number=7, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/data_labeling_job.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/data_labeling_job.py deleted file mode 100644 index a3a61df14f..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/data_labeling_job.py +++ /dev/null @@ -1,350 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec -from google.cloud.aiplatform_v1.types import job_state -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore -from google.type import money_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'DataLabelingJob', - 'ActiveLearningConfig', - 'SampleConfig', - 'TrainingConfig', - }, -) - - -class DataLabelingJob(proto.Message): - r"""DataLabelingJob is used to trigger a human labeling job on - unlabeled data from the following Dataset: - - Attributes: - name (str): - Output only. Resource name of the - DataLabelingJob. - display_name (str): - Required. The user-defined name of the - DataLabelingJob. The name can be up to 128 - characters long and can be consist of any UTF-8 - characters. - Display name of a DataLabelingJob. - datasets (Sequence[str]): - Required. Dataset resource names. Right now we only support - labeling from a single Dataset. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}`` - annotation_labels (Sequence[google.cloud.aiplatform_v1.types.DataLabelingJob.AnnotationLabelsEntry]): - Labels to assign to annotations generated by - this DataLabelingJob. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. See https://goo.gl/xmQnxf for more - information and examples of labels. System - reserved label keys are prefixed with - "aiplatform.googleapis.com/" and are immutable. - labeler_count (int): - Required. Number of labelers to work on each - DataItem. - instruction_uri (str): - Required. The Google Cloud Storage location - of the instruction pdf. This pdf is shared with - labelers, and provides detailed description on - how to label DataItems in Datasets. - inputs_schema_uri (str): - Required. Points to a YAML file stored on - Google Cloud Storage describing the config for a - specific type of DataLabelingJob. The schema - files that can be used here are found in the - https://storage.googleapis.com/google-cloud- - aiplatform bucket in the - /schema/datalabelingjob/inputs/ folder. - inputs (google.protobuf.struct_pb2.Value): - Required. Input config parameters for the - DataLabelingJob. - state (google.cloud.aiplatform_v1.types.JobState): - Output only. The detailed state of the job. - labeling_progress (int): - Output only. Current labeling job progress percentage scaled - in interval [0, 100], indicating the percentage of DataItems - that has been finished. - current_spend (google.type.money_pb2.Money): - Output only. Estimated cost(in US dollars) - that the DataLabelingJob has incurred to date. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - DataLabelingJob was created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - DataLabelingJob was updated most recently. - error (google.rpc.status_pb2.Status): - Output only. DataLabelingJob errors. It is only populated - when job's state is ``JOB_STATE_FAILED`` or - ``JOB_STATE_CANCELLED``. - labels (Sequence[google.cloud.aiplatform_v1.types.DataLabelingJob.LabelsEntry]): - The labels with user-defined metadata to organize your - DataLabelingJobs. - - Label keys and values can be no longer than 64 characters - (Unicode codepoints), can only contain lowercase letters, - numeric characters, underscores and dashes. International - characters are allowed. - - See https://goo.gl/xmQnxf for more information and examples - of labels. System reserved label keys are prefixed with - "aiplatform.googleapis.com/" and are immutable. Following - system labels exist for each DataLabelingJob: - - - "aiplatform.googleapis.com/schema": output only, its - value is the - [inputs_schema][google.cloud.aiplatform.v1.DataLabelingJob.inputs_schema_uri]'s - title. - specialist_pools (Sequence[str]): - The SpecialistPools' resource names - associated with this job. - encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec): - Customer-managed encryption key spec for a - DataLabelingJob. If set, this DataLabelingJob - will be secured by this key. - Note: Annotations created in the DataLabelingJob - are associated with the EncryptionSpec of the - Dataset they are exported to. - active_learning_config (google.cloud.aiplatform_v1.types.ActiveLearningConfig): - Parameters that configure the active learning - pipeline. Active learning will label the data - incrementally via several iterations. For every - iteration, it will select a batch of data based - on the sampling strategy. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - datasets = proto.RepeatedField( - proto.STRING, - number=3, - ) - annotation_labels = proto.MapField( - proto.STRING, - proto.STRING, - number=12, - ) - labeler_count = proto.Field( - proto.INT32, - number=4, - ) - instruction_uri = proto.Field( - proto.STRING, - number=5, - ) - inputs_schema_uri = proto.Field( - proto.STRING, - number=6, - ) - inputs = proto.Field( - proto.MESSAGE, - number=7, - message=struct_pb2.Value, - ) - state = proto.Field( - proto.ENUM, - number=8, - enum=job_state.JobState, - ) - labeling_progress = proto.Field( - proto.INT32, - number=13, - ) - current_spend = proto.Field( - proto.MESSAGE, - number=14, - message=money_pb2.Money, - ) - create_time = proto.Field( - proto.MESSAGE, - number=9, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=10, - message=timestamp_pb2.Timestamp, - ) - error = proto.Field( - proto.MESSAGE, - number=22, - message=status_pb2.Status, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=11, - ) - specialist_pools = proto.RepeatedField( - proto.STRING, - number=16, - ) - encryption_spec = proto.Field( - proto.MESSAGE, - number=20, - message=gca_encryption_spec.EncryptionSpec, - ) - active_learning_config = proto.Field( - proto.MESSAGE, - number=21, - message='ActiveLearningConfig', - ) - - -class ActiveLearningConfig(proto.Message): - r"""Parameters that configure the active learning pipeline. - Active learning will label the data incrementally by several - iterations. For every iteration, it will select a batch of data - based on the sampling strategy. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - max_data_item_count (int): - Max number of human labeled DataItems. - - This field is a member of `oneof`_ ``human_labeling_budget``. - max_data_item_percentage (int): - Max percent of total DataItems for human - labeling. - - This field is a member of `oneof`_ ``human_labeling_budget``. - sample_config (google.cloud.aiplatform_v1.types.SampleConfig): - Active learning data sampling config. For - every active learning labeling iteration, it - will select a batch of data based on the - sampling strategy. - training_config (google.cloud.aiplatform_v1.types.TrainingConfig): - CMLE training config. For every active - learning labeling iteration, system will train a - machine learning model on CMLE. The trained - model will be used by data sampling algorithm to - select DataItems. - """ - - max_data_item_count = proto.Field( - proto.INT64, - number=1, - oneof='human_labeling_budget', - ) - max_data_item_percentage = proto.Field( - proto.INT32, - number=2, - oneof='human_labeling_budget', - ) - sample_config = proto.Field( - proto.MESSAGE, - number=3, - message='SampleConfig', - ) - training_config = proto.Field( - proto.MESSAGE, - number=4, - message='TrainingConfig', - ) - - -class SampleConfig(proto.Message): - r"""Active learning data sampling config. For every active - learning labeling iteration, it will select a batch of data - based on the sampling strategy. - - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - initial_batch_sample_percentage (int): - The percentage of data needed to be labeled - in the first batch. - - This field is a member of `oneof`_ ``initial_batch_sample_size``. - following_batch_sample_percentage (int): - The percentage of data needed to be labeled - in each following batch (except the first - batch). - - This field is a member of `oneof`_ ``following_batch_sample_size``. - sample_strategy (google.cloud.aiplatform_v1.types.SampleConfig.SampleStrategy): - Field to choose sampling strategy. Sampling - strategy will decide which data should be - selected for human labeling in every batch. - """ - class SampleStrategy(proto.Enum): - r"""Sample strategy decides which subset of DataItems should be - selected for human labeling in every batch. - """ - SAMPLE_STRATEGY_UNSPECIFIED = 0 - UNCERTAINTY = 1 - - initial_batch_sample_percentage = proto.Field( - proto.INT32, - number=1, - oneof='initial_batch_sample_size', - ) - following_batch_sample_percentage = proto.Field( - proto.INT32, - number=3, - oneof='following_batch_sample_size', - ) - sample_strategy = proto.Field( - proto.ENUM, - number=5, - enum=SampleStrategy, - ) - - -class TrainingConfig(proto.Message): - r"""CMLE training config. For every active learning labeling - iteration, system will train a machine learning model on CMLE. - The trained model will be used by data sampling algorithm to - select DataItems. - - Attributes: - timeout_training_milli_hours (int): - The timeout hours for the CMLE training job, - expressed in milli hours i.e. 1,000 value in - this field means 1 hour. - """ - - timeout_training_milli_hours = proto.Field( - proto.INT64, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/dataset.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/dataset.py deleted file mode 100644 index 2292983a13..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/dataset.py +++ /dev/null @@ -1,237 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec -from google.cloud.aiplatform_v1.types import io -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'Dataset', - 'ImportDataConfig', - 'ExportDataConfig', - }, -) - - -class Dataset(proto.Message): - r"""A collection of DataItems and Annotations on them. - - Attributes: - name (str): - Output only. The resource name of the - Dataset. - display_name (str): - Required. The user-defined name of the - Dataset. The name can be up to 128 characters - long and can be consist of any UTF-8 characters. - description (str): - Optional. The description of the Dataset. - metadata_schema_uri (str): - Required. Points to a YAML file stored on - Google Cloud Storage describing additional - information about the Dataset. The schema is - defined as an OpenAPI 3.0.2 Schema Object. The - schema files that can be used here are found in - gs://google-cloud- - aiplatform/schema/dataset/metadata/. - metadata (google.protobuf.struct_pb2.Value): - Required. Additional information about the - Dataset. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Dataset was - created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Dataset was - last updated. - etag (str): - Used to perform consistent read-modify-write - updates. If not set, a blind "overwrite" update - happens. - labels (Sequence[google.cloud.aiplatform_v1.types.Dataset.LabelsEntry]): - The labels with user-defined metadata to organize your - Datasets. - - Label keys and values can be no longer than 64 characters - (Unicode codepoints), can only contain lowercase letters, - numeric characters, underscores and dashes. International - characters are allowed. No more than 64 user labels can be - associated with one Dataset (System labels are excluded). - - See https://goo.gl/xmQnxf for more information and examples - of labels. System reserved label keys are prefixed with - "aiplatform.googleapis.com/" and are immutable. Following - system labels exist for each Dataset: - - - "aiplatform.googleapis.com/dataset_metadata_schema": - output only, its value is the - [metadata_schema's][google.cloud.aiplatform.v1.Dataset.metadata_schema_uri] - title. - encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec): - Customer-managed encryption key spec for a - Dataset. If set, this Dataset and all sub- - resources of this Dataset will be secured by - this key. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - description = proto.Field( - proto.STRING, - number=16, - ) - metadata_schema_uri = proto.Field( - proto.STRING, - number=3, - ) - metadata = proto.Field( - proto.MESSAGE, - number=8, - message=struct_pb2.Value, - ) - create_time = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=5, - message=timestamp_pb2.Timestamp, - ) - etag = proto.Field( - proto.STRING, - number=6, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=7, - ) - encryption_spec = proto.Field( - proto.MESSAGE, - number=11, - message=gca_encryption_spec.EncryptionSpec, - ) - - -class ImportDataConfig(proto.Message): - r"""Describes the location from where we import data into a - Dataset, together with the labels that will be applied to the - DataItems and the Annotations. - - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - gcs_source (google.cloud.aiplatform_v1.types.GcsSource): - The Google Cloud Storage location for the - input content. - - This field is a member of `oneof`_ ``source``. - data_item_labels (Sequence[google.cloud.aiplatform_v1.types.ImportDataConfig.DataItemLabelsEntry]): - Labels that will be applied to newly imported DataItems. If - an identical DataItem as one being imported already exists - in the Dataset, then these labels will be appended to these - of the already existing one, and if labels with identical - key is imported before, the old label value will be - overwritten. If two DataItems are identical in the same - import data operation, the labels will be combined and if - key collision happens in this case, one of the values will - be picked randomly. Two DataItems are considered identical - if their content bytes are identical (e.g. image bytes or - pdf bytes). These labels will be overridden by Annotation - labels specified inside index file referenced by - [import_schema_uri][google.cloud.aiplatform.v1.ImportDataConfig.import_schema_uri], - e.g. jsonl file. - import_schema_uri (str): - Required. Points to a YAML file stored on Google Cloud - Storage describing the import format. Validation will be - done against the schema. The schema is defined as an - `OpenAPI 3.0.2 Schema - Object `__. - """ - - gcs_source = proto.Field( - proto.MESSAGE, - number=1, - oneof='source', - message=io.GcsSource, - ) - data_item_labels = proto.MapField( - proto.STRING, - proto.STRING, - number=2, - ) - import_schema_uri = proto.Field( - proto.STRING, - number=4, - ) - - -class ExportDataConfig(proto.Message): - r"""Describes what part of the Dataset is to be exported, the - destination of the export and how to export. - - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - gcs_destination (google.cloud.aiplatform_v1.types.GcsDestination): - The Google Cloud Storage location where the output is to be - written to. In the given directory a new directory will be - created with name: - ``export-data--`` - where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 - format. All export output will be written into that - directory. Inside that directory, annotations with the same - schema will be grouped into sub directories which are named - with the corresponding annotations' schema title. Inside - these sub directories, a schema.yaml will be created to - describe the output format. - - This field is a member of `oneof`_ ``destination``. - annotations_filter (str): - A filter on Annotations of the Dataset. Only Annotations on - to-be-exported DataItems(specified by [data_items_filter][]) - that match this filter will be exported. The filter syntax - is the same as in - [ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations]. - """ - - gcs_destination = proto.Field( - proto.MESSAGE, - number=1, - oneof='destination', - message=io.GcsDestination, - ) - annotations_filter = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/dataset_service.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/dataset_service.py deleted file mode 100644 index 2da0ec7c8d..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/dataset_service.py +++ /dev/null @@ -1,543 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1.types import annotation -from google.cloud.aiplatform_v1.types import data_item -from google.cloud.aiplatform_v1.types import dataset as gca_dataset -from google.cloud.aiplatform_v1.types import operation -from google.protobuf import field_mask_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'CreateDatasetRequest', - 'CreateDatasetOperationMetadata', - 'GetDatasetRequest', - 'UpdateDatasetRequest', - 'ListDatasetsRequest', - 'ListDatasetsResponse', - 'DeleteDatasetRequest', - 'ImportDataRequest', - 'ImportDataResponse', - 'ImportDataOperationMetadata', - 'ExportDataRequest', - 'ExportDataResponse', - 'ExportDataOperationMetadata', - 'ListDataItemsRequest', - 'ListDataItemsResponse', - 'GetAnnotationSpecRequest', - 'ListAnnotationsRequest', - 'ListAnnotationsResponse', - }, -) - - -class CreateDatasetRequest(proto.Message): - r"""Request message for - [DatasetService.CreateDataset][google.cloud.aiplatform.v1.DatasetService.CreateDataset]. - - Attributes: - parent (str): - Required. The resource name of the Location to create the - Dataset in. Format: - ``projects/{project}/locations/{location}`` - dataset (google.cloud.aiplatform_v1.types.Dataset): - Required. The Dataset to create. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - dataset = proto.Field( - proto.MESSAGE, - number=2, - message=gca_dataset.Dataset, - ) - - -class CreateDatasetOperationMetadata(proto.Message): - r"""Runtime operation information for - [DatasetService.CreateDataset][google.cloud.aiplatform.v1.DatasetService.CreateDataset]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): - The operation generic information. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class GetDatasetRequest(proto.Message): - r"""Request message for - [DatasetService.GetDataset][google.cloud.aiplatform.v1.DatasetService.GetDataset]. - - Attributes: - name (str): - Required. The name of the Dataset resource. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - - -class UpdateDatasetRequest(proto.Message): - r"""Request message for - [DatasetService.UpdateDataset][google.cloud.aiplatform.v1.DatasetService.UpdateDataset]. - - Attributes: - dataset (google.cloud.aiplatform_v1.types.Dataset): - Required. The Dataset which replaces the - resource on the server. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The update mask applies to the resource. For the - ``FieldMask`` definition, see - [google.protobuf.FieldMask][google.protobuf.FieldMask]. - Updatable fields: - - - ``display_name`` - - ``description`` - - ``labels`` - """ - - dataset = proto.Field( - proto.MESSAGE, - number=1, - message=gca_dataset.Dataset, - ) - update_mask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - - -class ListDatasetsRequest(proto.Message): - r"""Request message for - [DatasetService.ListDatasets][google.cloud.aiplatform.v1.DatasetService.ListDatasets]. - - Attributes: - parent (str): - Required. The name of the Dataset's parent resource. Format: - ``projects/{project}/locations/{location}`` - filter (str): - An expression for filtering the results of the request. For - field names both snake_case and camelCase are supported. - - - ``display_name``: supports = and != - - ``metadata_schema_uri``: supports = and != - - ``labels`` supports general map functions that is: - - - ``labels.key=value`` - key:value equality - - \`labels.key:\* or labels:key - key existence - - A key including a space must be quoted. - ``labels."a key"``. - - Some examples: - - - ``displayName="myDisplayName"`` - - ``labels.myKey="myValue"`` - page_size (int): - The standard list page size. - page_token (str): - The standard list page token. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - order_by (str): - A comma-separated list of fields to order by, sorted in - ascending order. Use "desc" after a field name for - descending. Supported fields: - - - ``display_name`` - - ``create_time`` - - ``update_time`` - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=5, - message=field_mask_pb2.FieldMask, - ) - order_by = proto.Field( - proto.STRING, - number=6, - ) - - -class ListDatasetsResponse(proto.Message): - r"""Response message for - [DatasetService.ListDatasets][google.cloud.aiplatform.v1.DatasetService.ListDatasets]. - - Attributes: - datasets (Sequence[google.cloud.aiplatform_v1.types.Dataset]): - A list of Datasets that matches the specified - filter in the request. - next_page_token (str): - The standard List next-page token. - """ - - @property - def raw_page(self): - return self - - datasets = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_dataset.Dataset, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class DeleteDatasetRequest(proto.Message): - r"""Request message for - [DatasetService.DeleteDataset][google.cloud.aiplatform.v1.DatasetService.DeleteDataset]. - - Attributes: - name (str): - Required. The resource name of the Dataset to delete. - Format: - ``projects/{project}/locations/{location}/datasets/{dataset}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ImportDataRequest(proto.Message): - r"""Request message for - [DatasetService.ImportData][google.cloud.aiplatform.v1.DatasetService.ImportData]. - - Attributes: - name (str): - Required. The name of the Dataset resource. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}`` - import_configs (Sequence[google.cloud.aiplatform_v1.types.ImportDataConfig]): - Required. The desired input locations. The - contents of all input locations will be imported - in one batch. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - import_configs = proto.RepeatedField( - proto.MESSAGE, - number=2, - message=gca_dataset.ImportDataConfig, - ) - - -class ImportDataResponse(proto.Message): - r"""Response message for - [DatasetService.ImportData][google.cloud.aiplatform.v1.DatasetService.ImportData]. - - """ - - -class ImportDataOperationMetadata(proto.Message): - r"""Runtime operation information for - [DatasetService.ImportData][google.cloud.aiplatform.v1.DatasetService.ImportData]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): - The common part of the operation metadata. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class ExportDataRequest(proto.Message): - r"""Request message for - [DatasetService.ExportData][google.cloud.aiplatform.v1.DatasetService.ExportData]. - - Attributes: - name (str): - Required. The name of the Dataset resource. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}`` - export_config (google.cloud.aiplatform_v1.types.ExportDataConfig): - Required. The desired output location. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - export_config = proto.Field( - proto.MESSAGE, - number=2, - message=gca_dataset.ExportDataConfig, - ) - - -class ExportDataResponse(proto.Message): - r"""Response message for - [DatasetService.ExportData][google.cloud.aiplatform.v1.DatasetService.ExportData]. - - Attributes: - exported_files (Sequence[str]): - All of the files that are exported in this - export operation. - """ - - exported_files = proto.RepeatedField( - proto.STRING, - number=1, - ) - - -class ExportDataOperationMetadata(proto.Message): - r"""Runtime operation information for - [DatasetService.ExportData][google.cloud.aiplatform.v1.DatasetService.ExportData]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): - The common part of the operation metadata. - gcs_output_directory (str): - A Google Cloud Storage directory which path - ends with '/'. The exported data is stored in - the directory. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - gcs_output_directory = proto.Field( - proto.STRING, - number=2, - ) - - -class ListDataItemsRequest(proto.Message): - r"""Request message for - [DatasetService.ListDataItems][google.cloud.aiplatform.v1.DatasetService.ListDataItems]. - - Attributes: - parent (str): - Required. The resource name of the Dataset to list DataItems - from. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}`` - filter (str): - The standard list filter. - page_size (int): - The standard list page size. - page_token (str): - The standard list page token. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - order_by (str): - A comma-separated list of fields to order by, - sorted in ascending order. Use "desc" after a - field name for descending. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=5, - message=field_mask_pb2.FieldMask, - ) - order_by = proto.Field( - proto.STRING, - number=6, - ) - - -class ListDataItemsResponse(proto.Message): - r"""Response message for - [DatasetService.ListDataItems][google.cloud.aiplatform.v1.DatasetService.ListDataItems]. - - Attributes: - data_items (Sequence[google.cloud.aiplatform_v1.types.DataItem]): - A list of DataItems that matches the - specified filter in the request. - next_page_token (str): - The standard List next-page token. - """ - - @property - def raw_page(self): - return self - - data_items = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=data_item.DataItem, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class GetAnnotationSpecRequest(proto.Message): - r"""Request message for - [DatasetService.GetAnnotationSpec][google.cloud.aiplatform.v1.DatasetService.GetAnnotationSpec]. - - Attributes: - name (str): - Required. The name of the AnnotationSpec resource. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}`` - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - - -class ListAnnotationsRequest(proto.Message): - r"""Request message for - [DatasetService.ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations]. - - Attributes: - parent (str): - Required. The resource name of the DataItem to list - Annotations from. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}`` - filter (str): - The standard list filter. - page_size (int): - The standard list page size. - page_token (str): - The standard list page token. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - order_by (str): - A comma-separated list of fields to order by, - sorted in ascending order. Use "desc" after a - field name for descending. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=5, - message=field_mask_pb2.FieldMask, - ) - order_by = proto.Field( - proto.STRING, - number=6, - ) - - -class ListAnnotationsResponse(proto.Message): - r"""Response message for - [DatasetService.ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations]. - - Attributes: - annotations (Sequence[google.cloud.aiplatform_v1.types.Annotation]): - A list of Annotations that matches the - specified filter in the request. - next_page_token (str): - The standard List next-page token. - """ - - @property - def raw_page(self): - return self - - annotations = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=annotation.Annotation, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/deployed_index_ref.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/deployed_index_ref.py deleted file mode 100644 index a5c5649b20..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/deployed_index_ref.py +++ /dev/null @@ -1,49 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'DeployedIndexRef', - }, -) - - -class DeployedIndexRef(proto.Message): - r"""Points to a DeployedIndex. - - Attributes: - index_endpoint (str): - Immutable. A resource name of the - IndexEndpoint. - deployed_index_id (str): - Immutable. The ID of the DeployedIndex in the - above IndexEndpoint. - """ - - index_endpoint = proto.Field( - proto.STRING, - number=1, - ) - deployed_index_id = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/deployed_model_ref.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/deployed_model_ref.py deleted file mode 100644 index 64bee94185..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/deployed_model_ref.py +++ /dev/null @@ -1,48 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'DeployedModelRef', - }, -) - - -class DeployedModelRef(proto.Message): - r"""Points to a DeployedModel. - - Attributes: - endpoint (str): - Immutable. A resource name of an Endpoint. - deployed_model_id (str): - Immutable. An ID of a DeployedModel in the - above Endpoint. - """ - - endpoint = proto.Field( - proto.STRING, - number=1, - ) - deployed_model_id = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/encryption_spec.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/encryption_spec.py deleted file mode 100644 index 3eda5aeb6d..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/encryption_spec.py +++ /dev/null @@ -1,47 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'EncryptionSpec', - }, -) - - -class EncryptionSpec(proto.Message): - r"""Represents a customer-managed encryption key spec that can be - applied to a top-level resource. - - Attributes: - kms_key_name (str): - Required. The Cloud KMS resource identifier of the customer - managed encryption key used to protect a resource. Has the - form: - ``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``. - The key needs to be in the same region as where the compute - resource is created. - """ - - kms_key_name = proto.Field( - proto.STRING, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/endpoint.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/endpoint.py deleted file mode 100644 index ec92d83792..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/endpoint.py +++ /dev/null @@ -1,372 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec -from google.cloud.aiplatform_v1.types import explanation -from google.cloud.aiplatform_v1.types import machine_resources -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'Endpoint', - 'DeployedModel', - 'PrivateEndpoints', - }, -) - - -class Endpoint(proto.Message): - r"""Models are deployed into it, and afterwards Endpoint is - called to obtain predictions and explanations. - - Attributes: - name (str): - Output only. The resource name of the - Endpoint. - display_name (str): - Required. The display name of the Endpoint. - The name can be up to 128 characters long and - can be consist of any UTF-8 characters. - description (str): - The description of the Endpoint. - deployed_models (Sequence[google.cloud.aiplatform_v1.types.DeployedModel]): - Output only. The models deployed in this Endpoint. To add or - remove DeployedModels use - [EndpointService.DeployModel][google.cloud.aiplatform.v1.EndpointService.DeployModel] - and - [EndpointService.UndeployModel][google.cloud.aiplatform.v1.EndpointService.UndeployModel] - respectively. - traffic_split (Sequence[google.cloud.aiplatform_v1.types.Endpoint.TrafficSplitEntry]): - A map from a DeployedModel's ID to the - percentage of this Endpoint's traffic that - should be forwarded to that DeployedModel. - If a DeployedModel's ID is not listed in this - map, then it receives no traffic. - - The traffic percentage values must add up to - 100, or map must be empty if the Endpoint is to - not accept any traffic at a moment. - etag (str): - Used to perform consistent read-modify-write - updates. If not set, a blind "overwrite" update - happens. - labels (Sequence[google.cloud.aiplatform_v1.types.Endpoint.LabelsEntry]): - The labels with user-defined metadata to - organize your Endpoints. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. - See https://goo.gl/xmQnxf for more information - and examples of labels. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Endpoint was - created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Endpoint was - last updated. - encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec): - Customer-managed encryption key spec for an - Endpoint. If set, this Endpoint and all sub- - resources of this Endpoint will be secured by - this key. - network (str): - The full name of the Google Compute Engine - `network `__ - to which the Endpoint should be peered. - - Private services access must already be configured for the - network. If left unspecified, the Endpoint is not peered - with any network. - - Only one of the fields, - [network][google.cloud.aiplatform.v1.Endpoint.network] or - [enable_private_service_connect][google.cloud.aiplatform.v1.Endpoint.enable_private_service_connect], - can be set. - - `Format `__: - ``projects/{project}/global/networks/{network}``. Where - ``{project}`` is a project number, as in ``12345``, and - ``{network}`` is network name. - enable_private_service_connect (bool): - If true, expose the Endpoint via private service connect. - - Only one of the fields, - [network][google.cloud.aiplatform.v1.Endpoint.network] or - [enable_private_service_connect][google.cloud.aiplatform.v1.Endpoint.enable_private_service_connect], - can be set. - model_deployment_monitoring_job (str): - Output only. Resource name of the Model Monitoring job - associated with this Endpoint if monitoring is enabled by - [CreateModelDeploymentMonitoringJob][]. Format: - ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - description = proto.Field( - proto.STRING, - number=3, - ) - deployed_models = proto.RepeatedField( - proto.MESSAGE, - number=4, - message='DeployedModel', - ) - traffic_split = proto.MapField( - proto.STRING, - proto.INT32, - number=5, - ) - etag = proto.Field( - proto.STRING, - number=6, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=7, - ) - create_time = proto.Field( - proto.MESSAGE, - number=8, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=9, - message=timestamp_pb2.Timestamp, - ) - encryption_spec = proto.Field( - proto.MESSAGE, - number=10, - message=gca_encryption_spec.EncryptionSpec, - ) - network = proto.Field( - proto.STRING, - number=13, - ) - enable_private_service_connect = proto.Field( - proto.BOOL, - number=17, - ) - model_deployment_monitoring_job = proto.Field( - proto.STRING, - number=14, - ) - - -class DeployedModel(proto.Message): - r"""A deployment of a Model. Endpoints contain one or more - DeployedModels. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - dedicated_resources (google.cloud.aiplatform_v1.types.DedicatedResources): - A description of resources that are dedicated - to the DeployedModel, and that need a higher - degree of manual configuration. - - This field is a member of `oneof`_ ``prediction_resources``. - automatic_resources (google.cloud.aiplatform_v1.types.AutomaticResources): - A description of resources that to large - degree are decided by Vertex AI, and require - only a modest additional configuration. - - This field is a member of `oneof`_ ``prediction_resources``. - id (str): - Immutable. The ID of the DeployedModel. If not provided upon - deployment, Vertex AI will generate a value for this ID. - - This value should be 1-10 characters, and valid characters - are /[0-9]/. - model (str): - Required. The name of the Model that this is - the deployment of. Note that the Model may be in - a different location than the DeployedModel's - Endpoint. - display_name (str): - The display name of the DeployedModel. If not provided upon - creation, the Model's display_name is used. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when the DeployedModel - was created. - explanation_spec (google.cloud.aiplatform_v1.types.ExplanationSpec): - Explanation configuration for this DeployedModel. - - When deploying a Model using - [EndpointService.DeployModel][google.cloud.aiplatform.v1.EndpointService.DeployModel], - this value overrides the value of - [Model.explanation_spec][google.cloud.aiplatform.v1.Model.explanation_spec]. - All fields of - [explanation_spec][google.cloud.aiplatform.v1.DeployedModel.explanation_spec] - are optional in the request. If a field of - [explanation_spec][google.cloud.aiplatform.v1.DeployedModel.explanation_spec] - is not populated, the value of the same field of - [Model.explanation_spec][google.cloud.aiplatform.v1.Model.explanation_spec] - is inherited. If the corresponding - [Model.explanation_spec][google.cloud.aiplatform.v1.Model.explanation_spec] - is not populated, all fields of the - [explanation_spec][google.cloud.aiplatform.v1.DeployedModel.explanation_spec] - will be used for the explanation configuration. - service_account (str): - The service account that the DeployedModel's container runs - as. Specify the email address of the service account. If - this service account is not specified, the container runs as - a service account that doesn't have access to the resource - project. - - Users deploying the Model must have the - ``iam.serviceAccounts.actAs`` permission on this service - account. - disable_container_logging (bool): - For custom-trained Models and AutoML Tabular Models, the - container of the DeployedModel instances will send - ``stderr`` and ``stdout`` streams to Stackdriver Logging by - default. Please note that the logs incur cost, which are - subject to `Cloud Logging - pricing `__. - - User can disable container logging by setting this flag to - true. - enable_access_logging (bool): - These logs are like standard server access - logs, containing information like timestamp and - latency for each prediction request. - Note that Stackdriver logs may incur a cost, - especially if your project receives prediction - requests at a high queries per second rate - (QPS). Estimate your costs before enabling this - option. - private_endpoints (google.cloud.aiplatform_v1.types.PrivateEndpoints): - Output only. Provide paths for users to send - predict/explain/health requests directly to the deployed - model services running on Cloud via private services access. - This field is populated if - [network][google.cloud.aiplatform.v1.Endpoint.network] is - configured. - """ - - dedicated_resources = proto.Field( - proto.MESSAGE, - number=7, - oneof='prediction_resources', - message=machine_resources.DedicatedResources, - ) - automatic_resources = proto.Field( - proto.MESSAGE, - number=8, - oneof='prediction_resources', - message=machine_resources.AutomaticResources, - ) - id = proto.Field( - proto.STRING, - number=1, - ) - model = proto.Field( - proto.STRING, - number=2, - ) - display_name = proto.Field( - proto.STRING, - number=3, - ) - create_time = proto.Field( - proto.MESSAGE, - number=6, - message=timestamp_pb2.Timestamp, - ) - explanation_spec = proto.Field( - proto.MESSAGE, - number=9, - message=explanation.ExplanationSpec, - ) - service_account = proto.Field( - proto.STRING, - number=11, - ) - disable_container_logging = proto.Field( - proto.BOOL, - number=15, - ) - enable_access_logging = proto.Field( - proto.BOOL, - number=13, - ) - private_endpoints = proto.Field( - proto.MESSAGE, - number=14, - message='PrivateEndpoints', - ) - - -class PrivateEndpoints(proto.Message): - r"""PrivateEndpoints proto is used to provide paths for users to send - requests privately. To send request via private service access, use - predict_http_uri, explain_http_uri or health_http_uri. To send - request via private service connect, use service_attachment. - - Attributes: - predict_http_uri (str): - Output only. Http(s) path to send prediction - requests. - explain_http_uri (str): - Output only. Http(s) path to send explain - requests. - health_http_uri (str): - Output only. Http(s) path to send health - check requests. - service_attachment (str): - Output only. The name of the service - attachment resource. Populated if private - service connect is enabled. - """ - - predict_http_uri = proto.Field( - proto.STRING, - number=1, - ) - explain_http_uri = proto.Field( - proto.STRING, - number=2, - ) - health_http_uri = proto.Field( - proto.STRING, - number=3, - ) - service_attachment = proto.Field( - proto.STRING, - number=4, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/endpoint_service.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/endpoint_service.py deleted file mode 100644 index d9ad0501b6..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/endpoint_service.py +++ /dev/null @@ -1,409 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1.types import endpoint as gca_endpoint -from google.cloud.aiplatform_v1.types import operation -from google.protobuf import field_mask_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'CreateEndpointRequest', - 'CreateEndpointOperationMetadata', - 'GetEndpointRequest', - 'ListEndpointsRequest', - 'ListEndpointsResponse', - 'UpdateEndpointRequest', - 'DeleteEndpointRequest', - 'DeployModelRequest', - 'DeployModelResponse', - 'DeployModelOperationMetadata', - 'UndeployModelRequest', - 'UndeployModelResponse', - 'UndeployModelOperationMetadata', - }, -) - - -class CreateEndpointRequest(proto.Message): - r"""Request message for - [EndpointService.CreateEndpoint][google.cloud.aiplatform.v1.EndpointService.CreateEndpoint]. - - Attributes: - parent (str): - Required. The resource name of the Location to create the - Endpoint in. Format: - ``projects/{project}/locations/{location}`` - endpoint (google.cloud.aiplatform_v1.types.Endpoint): - Required. The Endpoint to create. - endpoint_id (str): - Immutable. The ID to use for endpoint, which will become the - final component of the endpoint resource name. If not - provided, Vertex AI will generate a value for this ID. - - This value should be 1-10 characters, and valid characters - are /[0-9]/. When using HTTP/JSON, this field is populated - based on a query string argument, such as - ``?endpoint_id=12345``. This is the fallback for fields that - are not included in either the URI or the body. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - endpoint = proto.Field( - proto.MESSAGE, - number=2, - message=gca_endpoint.Endpoint, - ) - endpoint_id = proto.Field( - proto.STRING, - number=4, - ) - - -class CreateEndpointOperationMetadata(proto.Message): - r"""Runtime operation information for - [EndpointService.CreateEndpoint][google.cloud.aiplatform.v1.EndpointService.CreateEndpoint]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): - The operation generic information. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class GetEndpointRequest(proto.Message): - r"""Request message for - [EndpointService.GetEndpoint][google.cloud.aiplatform.v1.EndpointService.GetEndpoint] - - Attributes: - name (str): - Required. The name of the Endpoint resource. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListEndpointsRequest(proto.Message): - r"""Request message for - [EndpointService.ListEndpoints][google.cloud.aiplatform.v1.EndpointService.ListEndpoints]. - - Attributes: - parent (str): - Required. The resource name of the Location from which to - list the Endpoints. Format: - ``projects/{project}/locations/{location}`` - filter (str): - Optional. An expression for filtering the results of the - request. For field names both snake_case and camelCase are - supported. - - - ``endpoint`` supports = and !=. ``endpoint`` represents - the Endpoint ID, i.e. the last segment of the Endpoint's - [resource - name][google.cloud.aiplatform.v1.Endpoint.name]. - - ``display_name`` supports = and, != - - ``labels`` supports general map functions that is: - - - ``labels.key=value`` - key:value equality - - \`labels.key:\* or labels:key - key existence - - A key including a space must be quoted. - ``labels."a key"``. - - Some examples: - - - ``endpoint=1`` - - ``displayName="myDisplayName"`` - - ``labels.myKey="myValue"`` - page_size (int): - Optional. The standard list page size. - page_token (str): - Optional. The standard list page token. Typically obtained - via - [ListEndpointsResponse.next_page_token][google.cloud.aiplatform.v1.ListEndpointsResponse.next_page_token] - of the previous - [EndpointService.ListEndpoints][google.cloud.aiplatform.v1.EndpointService.ListEndpoints] - call. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Optional. Mask specifying which fields to - read. - order_by (str): - A comma-separated list of fields to order by, sorted in - ascending order. Use "desc" after a field name for - descending. Supported fields: - - - ``display_name`` - - ``create_time`` - - ``update_time`` - - Example: ``display_name, create_time desc``. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=5, - message=field_mask_pb2.FieldMask, - ) - order_by = proto.Field( - proto.STRING, - number=6, - ) - - -class ListEndpointsResponse(proto.Message): - r"""Response message for - [EndpointService.ListEndpoints][google.cloud.aiplatform.v1.EndpointService.ListEndpoints]. - - Attributes: - endpoints (Sequence[google.cloud.aiplatform_v1.types.Endpoint]): - List of Endpoints in the requested page. - next_page_token (str): - A token to retrieve the next page of results. Pass to - [ListEndpointsRequest.page_token][google.cloud.aiplatform.v1.ListEndpointsRequest.page_token] - to obtain that page. - """ - - @property - def raw_page(self): - return self - - endpoints = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_endpoint.Endpoint, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class UpdateEndpointRequest(proto.Message): - r"""Request message for - [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1.EndpointService.UpdateEndpoint]. - - Attributes: - endpoint (google.cloud.aiplatform_v1.types.Endpoint): - Required. The Endpoint which replaces the - resource on the server. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The update mask applies to the resource. See - [google.protobuf.FieldMask][google.protobuf.FieldMask]. - """ - - endpoint = proto.Field( - proto.MESSAGE, - number=1, - message=gca_endpoint.Endpoint, - ) - update_mask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - - -class DeleteEndpointRequest(proto.Message): - r"""Request message for - [EndpointService.DeleteEndpoint][google.cloud.aiplatform.v1.EndpointService.DeleteEndpoint]. - - Attributes: - name (str): - Required. The name of the Endpoint resource to be deleted. - Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class DeployModelRequest(proto.Message): - r"""Request message for - [EndpointService.DeployModel][google.cloud.aiplatform.v1.EndpointService.DeployModel]. - - Attributes: - endpoint (str): - Required. The name of the Endpoint resource into which to - deploy a Model. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - deployed_model (google.cloud.aiplatform_v1.types.DeployedModel): - Required. The DeployedModel to be created within the - Endpoint. Note that - [Endpoint.traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] - must be updated for the DeployedModel to start receiving - traffic, either as part of this call, or via - [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1.EndpointService.UpdateEndpoint]. - traffic_split (Sequence[google.cloud.aiplatform_v1.types.DeployModelRequest.TrafficSplitEntry]): - A map from a DeployedModel's ID to the percentage of this - Endpoint's traffic that should be forwarded to that - DeployedModel. - - If this field is non-empty, then the Endpoint's - [traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] - will be overwritten with it. To refer to the ID of the just - being deployed Model, a "0" should be used, and the actual - ID of the new DeployedModel will be filled in its place by - this method. The traffic percentage values must add up to - 100. - - If this field is empty, then the Endpoint's - [traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] - is not updated. - """ - - endpoint = proto.Field( - proto.STRING, - number=1, - ) - deployed_model = proto.Field( - proto.MESSAGE, - number=2, - message=gca_endpoint.DeployedModel, - ) - traffic_split = proto.MapField( - proto.STRING, - proto.INT32, - number=3, - ) - - -class DeployModelResponse(proto.Message): - r"""Response message for - [EndpointService.DeployModel][google.cloud.aiplatform.v1.EndpointService.DeployModel]. - - Attributes: - deployed_model (google.cloud.aiplatform_v1.types.DeployedModel): - The DeployedModel that had been deployed in - the Endpoint. - """ - - deployed_model = proto.Field( - proto.MESSAGE, - number=1, - message=gca_endpoint.DeployedModel, - ) - - -class DeployModelOperationMetadata(proto.Message): - r"""Runtime operation information for - [EndpointService.DeployModel][google.cloud.aiplatform.v1.EndpointService.DeployModel]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): - The operation generic information. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class UndeployModelRequest(proto.Message): - r"""Request message for - [EndpointService.UndeployModel][google.cloud.aiplatform.v1.EndpointService.UndeployModel]. - - Attributes: - endpoint (str): - Required. The name of the Endpoint resource from which to - undeploy a Model. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - deployed_model_id (str): - Required. The ID of the DeployedModel to be - undeployed from the Endpoint. - traffic_split (Sequence[google.cloud.aiplatform_v1.types.UndeployModelRequest.TrafficSplitEntry]): - If this field is provided, then the Endpoint's - [traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] - will be overwritten with it. If last DeployedModel is being - undeployed from the Endpoint, the [Endpoint.traffic_split] - will always end up empty when this call returns. A - DeployedModel will be successfully undeployed only if it - doesn't have any traffic assigned to it when this method - executes, or if this field unassigns any traffic to it. - """ - - endpoint = proto.Field( - proto.STRING, - number=1, - ) - deployed_model_id = proto.Field( - proto.STRING, - number=2, - ) - traffic_split = proto.MapField( - proto.STRING, - proto.INT32, - number=3, - ) - - -class UndeployModelResponse(proto.Message): - r"""Response message for - [EndpointService.UndeployModel][google.cloud.aiplatform.v1.EndpointService.UndeployModel]. - - """ - - -class UndeployModelOperationMetadata(proto.Message): - r"""Runtime operation information for - [EndpointService.UndeployModel][google.cloud.aiplatform.v1.EndpointService.UndeployModel]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): - The operation generic information. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/entity_type.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/entity_type.py deleted file mode 100644 index 74fb75c5bc..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/entity_type.py +++ /dev/null @@ -1,102 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'EntityType', - }, -) - - -class EntityType(proto.Message): - r"""An entity type is a type of object in a system that needs to - be modeled and have stored information about. For example, - driver is an entity type, and driver0 is an instance of an - entity type driver. - - Attributes: - name (str): - Immutable. Name of the EntityType. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - - The last part entity_type is assigned by the client. The - entity_type can be up to 64 characters long and can consist - only of ASCII Latin letters A-Z and a-z and underscore(_), - and ASCII digits 0-9 starting with a letter. The value will - be unique given a featurestore. - description (str): - Optional. Description of the EntityType. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this EntityType - was created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this EntityType - was most recently updated. - labels (Sequence[google.cloud.aiplatform_v1.types.EntityType.LabelsEntry]): - Optional. The labels with user-defined - metadata to organize your EntityTypes. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. - See https://goo.gl/xmQnxf for more information - on and examples of labels. No more than 64 user - labels can be associated with one EntityType - (System labels are excluded)." - System reserved label keys are prefixed with - "aiplatform.googleapis.com/" and are immutable. - etag (str): - Optional. Used to perform a consistent read- - odify-write updates. If not set, a blind - "overwrite" update happens. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - description = proto.Field( - proto.STRING, - number=2, - ) - create_time = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=6, - ) - etag = proto.Field( - proto.STRING, - number=7, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/env_var.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/env_var.py deleted file mode 100644 index 956d93aff5..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/env_var.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'EnvVar', - }, -) - - -class EnvVar(proto.Message): - r"""Represents an environment variable present in a Container or - Python Module. - - Attributes: - name (str): - Required. Name of the environment variable. - Must be a valid C identifier. - value (str): - Required. Variables that reference a $(VAR_NAME) are - expanded using the previous defined environment variables in - the container and any service environment variables. If a - variable cannot be resolved, the reference in the input - string will be unchanged. The $(VAR_NAME) syntax can be - escaped with a double $$, ie: $$(VAR_NAME). Escaped - references will never be expanded, regardless of whether the - variable exists or not. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - value = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/event.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/event.py deleted file mode 100644 index 03197de00f..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/event.py +++ /dev/null @@ -1,93 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'Event', - }, -) - - -class Event(proto.Message): - r"""An edge describing the relationship between an Artifact and - an Execution in a lineage graph. - - Attributes: - artifact (str): - Required. The relative resource name of the - Artifact in the Event. - execution (str): - Output only. The relative resource name of - the Execution in the Event. - event_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time the Event occurred. - type_ (google.cloud.aiplatform_v1.types.Event.Type): - Required. The type of the Event. - labels (Sequence[google.cloud.aiplatform_v1.types.Event.LabelsEntry]): - The labels with user-defined metadata to - annotate Events. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. No more than 64 user labels can be - associated with one Event (System labels are - excluded). - - See https://goo.gl/xmQnxf for more information - and examples of labels. System reserved label - keys are prefixed with - "aiplatform.googleapis.com/" and are immutable. - """ - class Type(proto.Enum): - r"""Describes whether an Event's Artifact is the Execution's - input or output. - """ - TYPE_UNSPECIFIED = 0 - INPUT = 1 - OUTPUT = 2 - - artifact = proto.Field( - proto.STRING, - number=1, - ) - execution = proto.Field( - proto.STRING, - number=2, - ) - event_time = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - type_ = proto.Field( - proto.ENUM, - number=4, - enum=Type, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=5, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/execution.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/execution.py deleted file mode 100644 index d0d4a6f6c7..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/execution.py +++ /dev/null @@ -1,149 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'Execution', - }, -) - - -class Execution(proto.Message): - r"""Instance of a general execution. - - Attributes: - name (str): - Output only. The resource name of the - Execution. - display_name (str): - User provided display name of the Execution. - May be up to 128 Unicode characters. - state (google.cloud.aiplatform_v1.types.Execution.State): - The state of this Execution. This is a - property of the Execution, and does not imply or - capture any ongoing process. This property is - managed by clients (such as Vertex AI Pipelines) - and the system does not prescribe or check the - validity of state transitions. - etag (str): - An eTag used to perform consistent read- - odify-write updates. If not set, a blind - "overwrite" update happens. - labels (Sequence[google.cloud.aiplatform_v1.types.Execution.LabelsEntry]): - The labels with user-defined metadata to - organize your Executions. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. No more than 64 user labels can be - associated with one Execution (System labels are - excluded). - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Execution - was created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Execution - was last updated. - schema_title (str): - The title of the schema describing the - metadata. - Schema title and version is expected to be - registered in earlier Create Schema calls. And - both are used together as unique identifiers to - identify schemas within the local metadata - store. - schema_version (str): - The version of the schema in ``schema_title`` to use. - - Schema title and version is expected to be registered in - earlier Create Schema calls. And both are used together as - unique identifiers to identify schemas within the local - metadata store. - metadata (google.protobuf.struct_pb2.Struct): - Properties of the Execution. - The size of this field should not exceed 200KB. - description (str): - Description of the Execution - """ - class State(proto.Enum): - r"""Describes the state of the Execution.""" - STATE_UNSPECIFIED = 0 - NEW = 1 - RUNNING = 2 - COMPLETE = 3 - FAILED = 4 - CACHED = 5 - CANCELLED = 6 - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - state = proto.Field( - proto.ENUM, - number=6, - enum=State, - ) - etag = proto.Field( - proto.STRING, - number=9, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=10, - ) - create_time = proto.Field( - proto.MESSAGE, - number=11, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=12, - message=timestamp_pb2.Timestamp, - ) - schema_title = proto.Field( - proto.STRING, - number=13, - ) - schema_version = proto.Field( - proto.STRING, - number=14, - ) - metadata = proto.Field( - proto.MESSAGE, - number=15, - message=struct_pb2.Struct, - ) - description = proto.Field( - proto.STRING, - number=16, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/explanation.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/explanation.py deleted file mode 100644 index 1f0ca8addf..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/explanation.py +++ /dev/null @@ -1,719 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1.types import explanation_metadata -from google.protobuf import struct_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'Explanation', - 'ModelExplanation', - 'Attribution', - 'ExplanationSpec', - 'ExplanationParameters', - 'SampledShapleyAttribution', - 'IntegratedGradientsAttribution', - 'XraiAttribution', - 'SmoothGradConfig', - 'FeatureNoiseSigma', - 'BlurBaselineConfig', - 'ExplanationSpecOverride', - 'ExplanationMetadataOverride', - }, -) - - -class Explanation(proto.Message): - r"""Explanation of a prediction (provided in - [PredictResponse.predictions][google.cloud.aiplatform.v1.PredictResponse.predictions]) - produced by the Model on a given - [instance][google.cloud.aiplatform.v1.ExplainRequest.instances]. - - Attributes: - attributions (Sequence[google.cloud.aiplatform_v1.types.Attribution]): - Output only. Feature attributions grouped by predicted - outputs. - - For Models that predict only one output, such as regression - Models that predict only one score, there is only one - attibution that explains the predicted output. For Models - that predict multiple outputs, such as multiclass Models - that predict multiple classes, each element explains one - specific item. - [Attribution.output_index][google.cloud.aiplatform.v1.Attribution.output_index] - can be used to identify which output this attribution is - explaining. - - If users set - [ExplanationParameters.top_k][google.cloud.aiplatform.v1.ExplanationParameters.top_k], - the attributions are sorted by - [instance_output_value][Attributions.instance_output_value] - in descending order. If - [ExplanationParameters.output_indices][google.cloud.aiplatform.v1.ExplanationParameters.output_indices] - is specified, the attributions are stored by - [Attribution.output_index][google.cloud.aiplatform.v1.Attribution.output_index] - in the same order as they appear in the output_indices. - """ - - attributions = proto.RepeatedField( - proto.MESSAGE, - number=1, - message='Attribution', - ) - - -class ModelExplanation(proto.Message): - r"""Aggregated explanation metrics for a Model over a set of - instances. - - Attributes: - mean_attributions (Sequence[google.cloud.aiplatform_v1.types.Attribution]): - Output only. Aggregated attributions explaining the Model's - prediction outputs over the set of instances. The - attributions are grouped by outputs. - - For Models that predict only one output, such as regression - Models that predict only one score, there is only one - attibution that explains the predicted output. For Models - that predict multiple outputs, such as multiclass Models - that predict multiple classes, each element explains one - specific item. - [Attribution.output_index][google.cloud.aiplatform.v1.Attribution.output_index] - can be used to identify which output this attribution is - explaining. - - The - [baselineOutputValue][google.cloud.aiplatform.v1.Attribution.baseline_output_value], - [instanceOutputValue][google.cloud.aiplatform.v1.Attribution.instance_output_value] - and - [featureAttributions][google.cloud.aiplatform.v1.Attribution.feature_attributions] - fields are averaged over the test data. - - NOTE: Currently AutoML tabular classification Models produce - only one attribution, which averages attributions over all - the classes it predicts. - [Attribution.approximation_error][google.cloud.aiplatform.v1.Attribution.approximation_error] - is not populated. - """ - - mean_attributions = proto.RepeatedField( - proto.MESSAGE, - number=1, - message='Attribution', - ) - - -class Attribution(proto.Message): - r"""Attribution that explains a particular prediction output. - - Attributes: - baseline_output_value (float): - Output only. Model predicted output if the input instance is - constructed from the baselines of all the features defined - in - [ExplanationMetadata.inputs][google.cloud.aiplatform.v1.ExplanationMetadata.inputs]. - The field name of the output is determined by the key in - [ExplanationMetadata.outputs][google.cloud.aiplatform.v1.ExplanationMetadata.outputs]. - - If the Model's predicted output has multiple dimensions - (rank > 1), this is the value in the output located by - [output_index][google.cloud.aiplatform.v1.Attribution.output_index]. - - If there are multiple baselines, their output values are - averaged. - instance_output_value (float): - Output only. Model predicted output on the corresponding - [explanation instance][ExplainRequest.instances]. The field - name of the output is determined by the key in - [ExplanationMetadata.outputs][google.cloud.aiplatform.v1.ExplanationMetadata.outputs]. - - If the Model predicted output has multiple dimensions, this - is the value in the output located by - [output_index][google.cloud.aiplatform.v1.Attribution.output_index]. - feature_attributions (google.protobuf.struct_pb2.Value): - Output only. Attributions of each explained feature. - Features are extracted from the [prediction - instances][google.cloud.aiplatform.v1.ExplainRequest.instances] - according to [explanation metadata for - inputs][google.cloud.aiplatform.v1.ExplanationMetadata.inputs]. - - The value is a struct, whose keys are the name of the - feature. The values are how much the feature in the - [instance][google.cloud.aiplatform.v1.ExplainRequest.instances] - contributed to the predicted result. - - The format of the value is determined by the feature's input - format: - - - If the feature is a scalar value, the attribution value - is a [floating - number][google.protobuf.Value.number_value]. - - - If the feature is an array of scalar values, the - attribution value is an - [array][google.protobuf.Value.list_value]. - - - If the feature is a struct, the attribution value is a - [struct][google.protobuf.Value.struct_value]. The keys in - the attribution value struct are the same as the keys in - the feature struct. The formats of the values in the - attribution struct are determined by the formats of the - values in the feature struct. - - The - [ExplanationMetadata.feature_attributions_schema_uri][google.cloud.aiplatform.v1.ExplanationMetadata.feature_attributions_schema_uri] - field, pointed to by the - [ExplanationSpec][google.cloud.aiplatform.v1.ExplanationSpec] - field of the - [Endpoint.deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models] - object, points to the schema file that describes the - features and their attribution values (if it is populated). - output_index (Sequence[int]): - Output only. The index that locates the explained prediction - output. - - If the prediction output is a scalar value, output_index is - not populated. If the prediction output has multiple - dimensions, the length of the output_index list is the same - as the number of dimensions of the output. The i-th element - in output_index is the element index of the i-th dimension - of the output vector. Indices start from 0. - output_display_name (str): - Output only. The display name of the output identified by - [output_index][google.cloud.aiplatform.v1.Attribution.output_index]. - For example, the predicted class name by a - multi-classification Model. - - This field is only populated iff the Model predicts display - names as a separate field along with the explained output. - The predicted display name must has the same shape of the - explained output, and can be located using output_index. - approximation_error (float): - Output only. Error of - [feature_attributions][google.cloud.aiplatform.v1.Attribution.feature_attributions] - caused by approximation used in the explanation method. - Lower value means more precise attributions. - - - For Sampled Shapley - [attribution][google.cloud.aiplatform.v1.ExplanationParameters.sampled_shapley_attribution], - increasing - [path_count][google.cloud.aiplatform.v1.SampledShapleyAttribution.path_count] - might reduce the error. - - For Integrated Gradients - [attribution][google.cloud.aiplatform.v1.ExplanationParameters.integrated_gradients_attribution], - increasing - [step_count][google.cloud.aiplatform.v1.IntegratedGradientsAttribution.step_count] - might reduce the error. - - For [XRAI - attribution][google.cloud.aiplatform.v1.ExplanationParameters.xrai_attribution], - increasing - [step_count][google.cloud.aiplatform.v1.XraiAttribution.step_count] - might reduce the error. - - See `this - introduction `__ - for more information. - output_name (str): - Output only. Name of the explain output. Specified as the - key in - [ExplanationMetadata.outputs][google.cloud.aiplatform.v1.ExplanationMetadata.outputs]. - """ - - baseline_output_value = proto.Field( - proto.DOUBLE, - number=1, - ) - instance_output_value = proto.Field( - proto.DOUBLE, - number=2, - ) - feature_attributions = proto.Field( - proto.MESSAGE, - number=3, - message=struct_pb2.Value, - ) - output_index = proto.RepeatedField( - proto.INT32, - number=4, - ) - output_display_name = proto.Field( - proto.STRING, - number=5, - ) - approximation_error = proto.Field( - proto.DOUBLE, - number=6, - ) - output_name = proto.Field( - proto.STRING, - number=7, - ) - - -class ExplanationSpec(proto.Message): - r"""Specification of Model explanation. - - Attributes: - parameters (google.cloud.aiplatform_v1.types.ExplanationParameters): - Required. Parameters that configure - explaining of the Model's predictions. - metadata (google.cloud.aiplatform_v1.types.ExplanationMetadata): - Required. Metadata describing the Model's - input and output for explanation. - """ - - parameters = proto.Field( - proto.MESSAGE, - number=1, - message='ExplanationParameters', - ) - metadata = proto.Field( - proto.MESSAGE, - number=2, - message=explanation_metadata.ExplanationMetadata, - ) - - -class ExplanationParameters(proto.Message): - r"""Parameters to configure explaining for Model's predictions. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - sampled_shapley_attribution (google.cloud.aiplatform_v1.types.SampledShapleyAttribution): - An attribution method that approximates - Shapley values for features that contribute to - the label being predicted. A sampling strategy - is used to approximate the value rather than - considering all subsets of features. Refer to - this paper for model details: - https://arxiv.org/abs/1306.4265. - - This field is a member of `oneof`_ ``method``. - integrated_gradients_attribution (google.cloud.aiplatform_v1.types.IntegratedGradientsAttribution): - An attribution method that computes Aumann- - hapley values taking advantage of the model's - fully differentiable structure. Refer to this - paper for more details: - https://arxiv.org/abs/1703.01365 - - This field is a member of `oneof`_ ``method``. - xrai_attribution (google.cloud.aiplatform_v1.types.XraiAttribution): - An attribution method that redistributes - Integrated Gradients attribution to segmented - regions, taking advantage of the model's fully - differentiable structure. Refer to this paper - for more details: - https://arxiv.org/abs/1906.02825 - XRAI currently performs better on natural - images, like a picture of a house or an animal. - If the images are taken in artificial - environments, like a lab or manufacturing line, - or from diagnostic equipment, like x-rays or - quality-control cameras, use Integrated - Gradients instead. - - This field is a member of `oneof`_ ``method``. - top_k (int): - If populated, returns attributions for top K - indices of outputs (defaults to 1). Only applies - to Models that predicts more than one outputs - (e,g, multi-class Models). When set to -1, - returns explanations for all outputs. - output_indices (google.protobuf.struct_pb2.ListValue): - If populated, only returns attributions that have - [output_index][google.cloud.aiplatform.v1.Attribution.output_index] - contained in output_indices. It must be an ndarray of - integers, with the same shape of the output it's explaining. - - If not populated, returns attributions for - [top_k][google.cloud.aiplatform.v1.ExplanationParameters.top_k] - indices of outputs. If neither top_k nor output_indeices is - populated, returns the argmax index of the outputs. - - Only applicable to Models that predict multiple outputs - (e,g, multi-class Models that predict multiple classes). - """ - - sampled_shapley_attribution = proto.Field( - proto.MESSAGE, - number=1, - oneof='method', - message='SampledShapleyAttribution', - ) - integrated_gradients_attribution = proto.Field( - proto.MESSAGE, - number=2, - oneof='method', - message='IntegratedGradientsAttribution', - ) - xrai_attribution = proto.Field( - proto.MESSAGE, - number=3, - oneof='method', - message='XraiAttribution', - ) - top_k = proto.Field( - proto.INT32, - number=4, - ) - output_indices = proto.Field( - proto.MESSAGE, - number=5, - message=struct_pb2.ListValue, - ) - - -class SampledShapleyAttribution(proto.Message): - r"""An attribution method that approximates Shapley values for - features that contribute to the label being predicted. A - sampling strategy is used to approximate the value rather than - considering all subsets of features. - - Attributes: - path_count (int): - Required. The number of feature permutations to consider - when approximating the Shapley values. - - Valid range of its value is [1, 50], inclusively. - """ - - path_count = proto.Field( - proto.INT32, - number=1, - ) - - -class IntegratedGradientsAttribution(proto.Message): - r"""An attribution method that computes the Aumann-Shapley value - taking advantage of the model's fully differentiable structure. - Refer to this paper for more details: - https://arxiv.org/abs/1703.01365 - - Attributes: - step_count (int): - Required. The number of steps for approximating the path - integral. A good value to start is 50 and gradually increase - until the sum to diff property is within the desired error - range. - - Valid range of its value is [1, 100], inclusively. - smooth_grad_config (google.cloud.aiplatform_v1.types.SmoothGradConfig): - Config for SmoothGrad approximation of - gradients. - When enabled, the gradients are approximated by - averaging the gradients from noisy samples in - the vicinity of the inputs. Adding noise can - help improve the computed gradients. Refer to - this paper for more details: - https://arxiv.org/pdf/1706.03825.pdf - blur_baseline_config (google.cloud.aiplatform_v1.types.BlurBaselineConfig): - Config for IG with blur baseline. - When enabled, a linear path from the maximally - blurred image to the input image is created. - Using a blurred baseline instead of zero (black - image) is motivated by the BlurIG approach - explained here: https://arxiv.org/abs/2004.03383 - """ - - step_count = proto.Field( - proto.INT32, - number=1, - ) - smooth_grad_config = proto.Field( - proto.MESSAGE, - number=2, - message='SmoothGradConfig', - ) - blur_baseline_config = proto.Field( - proto.MESSAGE, - number=3, - message='BlurBaselineConfig', - ) - - -class XraiAttribution(proto.Message): - r"""An explanation method that redistributes Integrated Gradients - attributions to segmented regions, taking advantage of the - model's fully differentiable structure. Refer to this paper for - more details: https://arxiv.org/abs/1906.02825 - - Supported only by image Models. - - Attributes: - step_count (int): - Required. The number of steps for approximating the path - integral. A good value to start is 50 and gradually increase - until the sum to diff property is met within the desired - error range. - - Valid range of its value is [1, 100], inclusively. - smooth_grad_config (google.cloud.aiplatform_v1.types.SmoothGradConfig): - Config for SmoothGrad approximation of - gradients. - When enabled, the gradients are approximated by - averaging the gradients from noisy samples in - the vicinity of the inputs. Adding noise can - help improve the computed gradients. Refer to - this paper for more details: - https://arxiv.org/pdf/1706.03825.pdf - blur_baseline_config (google.cloud.aiplatform_v1.types.BlurBaselineConfig): - Config for XRAI with blur baseline. - When enabled, a linear path from the maximally - blurred image to the input image is created. - Using a blurred baseline instead of zero (black - image) is motivated by the BlurIG approach - explained here: https://arxiv.org/abs/2004.03383 - """ - - step_count = proto.Field( - proto.INT32, - number=1, - ) - smooth_grad_config = proto.Field( - proto.MESSAGE, - number=2, - message='SmoothGradConfig', - ) - blur_baseline_config = proto.Field( - proto.MESSAGE, - number=3, - message='BlurBaselineConfig', - ) - - -class SmoothGradConfig(proto.Message): - r"""Config for SmoothGrad approximation of gradients. - When enabled, the gradients are approximated by averaging the - gradients from noisy samples in the vicinity of the inputs. - Adding noise can help improve the computed gradients. Refer to - this paper for more details: - https://arxiv.org/pdf/1706.03825.pdf - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - noise_sigma (float): - This is a single float value and will be used to add noise - to all the features. Use this field when all features are - normalized to have the same distribution: scale to range [0, - 1], [-1, 1] or z-scoring, where features are normalized to - have 0-mean and 1-variance. Learn more about - `normalization `__. - - For best results the recommended value is about 10% - 20% of - the standard deviation of the input feature. Refer to - section 3.2 of the SmoothGrad paper: - https://arxiv.org/pdf/1706.03825.pdf. Defaults to 0.1. - - If the distribution is different per feature, set - [feature_noise_sigma][google.cloud.aiplatform.v1.SmoothGradConfig.feature_noise_sigma] - instead for each feature. - - This field is a member of `oneof`_ ``GradientNoiseSigma``. - feature_noise_sigma (google.cloud.aiplatform_v1.types.FeatureNoiseSigma): - This is similar to - [noise_sigma][google.cloud.aiplatform.v1.SmoothGradConfig.noise_sigma], - but provides additional flexibility. A separate noise sigma - can be provided for each feature, which is useful if their - distributions are different. No noise is added to features - that are not set. If this field is unset, - [noise_sigma][google.cloud.aiplatform.v1.SmoothGradConfig.noise_sigma] - will be used for all features. - - This field is a member of `oneof`_ ``GradientNoiseSigma``. - noisy_sample_count (int): - The number of gradient samples to use for approximation. The - higher this number, the more accurate the gradient is, but - the runtime complexity increases by this factor as well. - Valid range of its value is [1, 50]. Defaults to 3. - """ - - noise_sigma = proto.Field( - proto.FLOAT, - number=1, - oneof='GradientNoiseSigma', - ) - feature_noise_sigma = proto.Field( - proto.MESSAGE, - number=2, - oneof='GradientNoiseSigma', - message='FeatureNoiseSigma', - ) - noisy_sample_count = proto.Field( - proto.INT32, - number=3, - ) - - -class FeatureNoiseSigma(proto.Message): - r"""Noise sigma by features. Noise sigma represents the standard - deviation of the gaussian kernel that will be used to add noise - to interpolated inputs prior to computing gradients. - - Attributes: - noise_sigma (Sequence[google.cloud.aiplatform_v1.types.FeatureNoiseSigma.NoiseSigmaForFeature]): - Noise sigma per feature. No noise is added to - features that are not set. - """ - - class NoiseSigmaForFeature(proto.Message): - r"""Noise sigma for a single feature. - - Attributes: - name (str): - The name of the input feature for which noise sigma is - provided. The features are defined in [explanation metadata - inputs][google.cloud.aiplatform.v1.ExplanationMetadata.inputs]. - sigma (float): - This represents the standard deviation of the Gaussian - kernel that will be used to add noise to the feature prior - to computing gradients. Similar to - [noise_sigma][google.cloud.aiplatform.v1.SmoothGradConfig.noise_sigma] - but represents the noise added to the current feature. - Defaults to 0.1. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - sigma = proto.Field( - proto.FLOAT, - number=2, - ) - - noise_sigma = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=NoiseSigmaForFeature, - ) - - -class BlurBaselineConfig(proto.Message): - r"""Config for blur baseline. - When enabled, a linear path from the maximally blurred image to - the input image is created. Using a blurred baseline instead of - zero (black image) is motivated by the BlurIG approach explained - here: - https://arxiv.org/abs/2004.03383 - - Attributes: - max_blur_sigma (float): - The standard deviation of the blur kernel for - the blurred baseline. The same blurring - parameter is used for both the height and the - width dimension. If not set, the method defaults - to the zero (i.e. black for images) baseline. - """ - - max_blur_sigma = proto.Field( - proto.FLOAT, - number=1, - ) - - -class ExplanationSpecOverride(proto.Message): - r"""The [ExplanationSpec][google.cloud.aiplatform.v1.ExplanationSpec] - entries that can be overridden at [online - explanation][google.cloud.aiplatform.v1.PredictionService.Explain] - time. - - Attributes: - parameters (google.cloud.aiplatform_v1.types.ExplanationParameters): - The parameters to be overridden. Note that the - [method][google.cloud.aiplatform.v1.ExplanationParameters.method] - cannot be changed. If not specified, no parameter is - overridden. - metadata (google.cloud.aiplatform_v1.types.ExplanationMetadataOverride): - The metadata to be overridden. If not - specified, no metadata is overridden. - """ - - parameters = proto.Field( - proto.MESSAGE, - number=1, - message='ExplanationParameters', - ) - metadata = proto.Field( - proto.MESSAGE, - number=2, - message='ExplanationMetadataOverride', - ) - - -class ExplanationMetadataOverride(proto.Message): - r"""The - [ExplanationMetadata][google.cloud.aiplatform.v1.ExplanationMetadata] - entries that can be overridden at [online - explanation][google.cloud.aiplatform.v1.PredictionService.Explain] - time. - - Attributes: - inputs (Sequence[google.cloud.aiplatform_v1.types.ExplanationMetadataOverride.InputsEntry]): - Required. Overrides the [input - metadata][google.cloud.aiplatform.v1.ExplanationMetadata.inputs] - of the features. The key is the name of the feature to be - overridden. The keys specified here must exist in the input - metadata to be overridden. If a feature is not specified - here, the corresponding feature's input metadata is not - overridden. - """ - - class InputMetadataOverride(proto.Message): - r"""The [input - metadata][google.cloud.aiplatform.v1.ExplanationMetadata.InputMetadata] - entries to be overridden. - - Attributes: - input_baselines (Sequence[google.protobuf.struct_pb2.Value]): - Baseline inputs for this feature. - - This overrides the ``input_baseline`` field of the - [ExplanationMetadata.InputMetadata][google.cloud.aiplatform.v1.ExplanationMetadata.InputMetadata] - object of the corresponding feature's input metadata. If - it's not specified, the original baselines are not - overridden. - """ - - input_baselines = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=struct_pb2.Value, - ) - - inputs = proto.MapField( - proto.STRING, - proto.MESSAGE, - number=1, - message=InputMetadataOverride, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/explanation_metadata.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/explanation_metadata.py deleted file mode 100644 index ff88118702..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/explanation_metadata.py +++ /dev/null @@ -1,460 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import struct_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'ExplanationMetadata', - }, -) - - -class ExplanationMetadata(proto.Message): - r"""Metadata describing the Model's input and output for - explanation. - - Attributes: - inputs (Sequence[google.cloud.aiplatform_v1.types.ExplanationMetadata.InputsEntry]): - Required. Map from feature names to feature input metadata. - Keys are the name of the features. Values are the - specification of the feature. - - An empty InputMetadata is valid. It describes a text feature - which has the name specified as the key in - [ExplanationMetadata.inputs][google.cloud.aiplatform.v1.ExplanationMetadata.inputs]. - The baseline of the empty feature is chosen by Vertex AI. - - For Vertex AI-provided Tensorflow images, the key can be any - friendly name of the feature. Once specified, - [featureAttributions][google.cloud.aiplatform.v1.Attribution.feature_attributions] - are keyed by this key (if not grouped with another feature). - - For custom images, the key must match with the key in - [instance][google.cloud.aiplatform.v1.ExplainRequest.instances]. - outputs (Sequence[google.cloud.aiplatform_v1.types.ExplanationMetadata.OutputsEntry]): - Required. Map from output names to output - metadata. - For Vertex AI-provided Tensorflow images, keys - can be any user defined string that consists of - any UTF-8 characters. - For custom images, keys are the name of the - output field in the prediction to be explained. - - Currently only one key is allowed. - feature_attributions_schema_uri (str): - Points to a YAML file stored on Google Cloud Storage - describing the format of the [feature - attributions][google.cloud.aiplatform.v1.Attribution.feature_attributions]. - The schema is defined as an OpenAPI 3.0.2 `Schema - Object `__. - AutoML tabular Models always have this field populated by - Vertex AI. Note: The URI given on output may be different, - including the URI scheme, than the one given on input. The - output URI will point to a location where the user only has - a read access. - """ - - class InputMetadata(proto.Message): - r"""Metadata of the input of a feature. - - Fields other than - [InputMetadata.input_baselines][google.cloud.aiplatform.v1.ExplanationMetadata.InputMetadata.input_baselines] - are applicable only for Models that are using Vertex AI-provided - images for Tensorflow. - - Attributes: - input_baselines (Sequence[google.protobuf.struct_pb2.Value]): - Baseline inputs for this feature. - - If no baseline is specified, Vertex AI chooses the baseline - for this feature. If multiple baselines are specified, - Vertex AI returns the average attributions across them in - [Attribution.feature_attributions][google.cloud.aiplatform.v1.Attribution.feature_attributions]. - - For Vertex AI-provided Tensorflow images (both 1.x and 2.x), - the shape of each baseline must match the shape of the input - tensor. If a scalar is provided, we broadcast to the same - shape as the input tensor. - - For custom images, the element of the baselines must be in - the same format as the feature's input in the - [instance][google.cloud.aiplatform.v1.ExplainRequest.instances][]. - The schema of any single instance may be specified via - Endpoint's DeployedModels' - [Model's][google.cloud.aiplatform.v1.DeployedModel.model] - [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] - [instance_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri]. - input_tensor_name (str): - Name of the input tensor for this feature. - Required and is only applicable to Vertex AI- - provided images for Tensorflow. - encoding (google.cloud.aiplatform_v1.types.ExplanationMetadata.InputMetadata.Encoding): - Defines how the feature is encoded into the - input tensor. Defaults to IDENTITY. - modality (str): - Modality of the feature. Valid values are: - numeric, image. Defaults to numeric. - feature_value_domain (google.cloud.aiplatform_v1.types.ExplanationMetadata.InputMetadata.FeatureValueDomain): - The domain details of the input feature - value. Like min/max, original mean or standard - deviation if normalized. - indices_tensor_name (str): - Specifies the index of the values of the input tensor. - Required when the input tensor is a sparse representation. - Refer to Tensorflow documentation for more details: - https://www.tensorflow.org/api_docs/python/tf/sparse/SparseTensor. - dense_shape_tensor_name (str): - Specifies the shape of the values of the input if the input - is a sparse representation. Refer to Tensorflow - documentation for more details: - https://www.tensorflow.org/api_docs/python/tf/sparse/SparseTensor. - index_feature_mapping (Sequence[str]): - A list of feature names for each index in the input tensor. - Required when the input - [InputMetadata.encoding][google.cloud.aiplatform.v1.ExplanationMetadata.InputMetadata.encoding] - is BAG_OF_FEATURES, BAG_OF_FEATURES_SPARSE, INDICATOR. - encoded_tensor_name (str): - Encoded tensor is a transformation of the input tensor. Must - be provided if choosing [Integrated Gradients - attribution][google.cloud.aiplatform.v1.ExplanationParameters.integrated_gradients_attribution] - or [XRAI - attribution][google.cloud.aiplatform.v1.ExplanationParameters.xrai_attribution] - and the input tensor is not differentiable. - - An encoded tensor is generated if the input tensor is - encoded by a lookup table. - encoded_baselines (Sequence[google.protobuf.struct_pb2.Value]): - A list of baselines for the encoded tensor. - The shape of each baseline should match the - shape of the encoded tensor. If a scalar is - provided, Vertex AI broadcasts to the same shape - as the encoded tensor. - visualization (google.cloud.aiplatform_v1.types.ExplanationMetadata.InputMetadata.Visualization): - Visualization configurations for image - explanation. - group_name (str): - Name of the group that the input belongs to. Features with - the same group name will be treated as one feature when - computing attributions. Features grouped together can have - different shapes in value. If provided, there will be one - single attribution generated in - [Attribution.feature_attributions][google.cloud.aiplatform.v1.Attribution.feature_attributions], - keyed by the group name. - """ - class Encoding(proto.Enum): - r"""Defines how a feature is encoded. Defaults to IDENTITY.""" - ENCODING_UNSPECIFIED = 0 - IDENTITY = 1 - BAG_OF_FEATURES = 2 - BAG_OF_FEATURES_SPARSE = 3 - INDICATOR = 4 - COMBINED_EMBEDDING = 5 - CONCAT_EMBEDDING = 6 - - class FeatureValueDomain(proto.Message): - r"""Domain details of the input feature value. Provides numeric - information about the feature, such as its range (min, max). If the - feature has been pre-processed, for example with z-scoring, then it - provides information about how to recover the original feature. For - example, if the input feature is an image and it has been - pre-processed to obtain 0-mean and stddev = 1 values, then - original_mean, and original_stddev refer to the mean and stddev of - the original feature (e.g. image tensor) from which input feature - (with mean = 0 and stddev = 1) was obtained. - - Attributes: - min_value (float): - The minimum permissible value for this - feature. - max_value (float): - The maximum permissible value for this - feature. - original_mean (float): - If this input feature has been normalized to a mean value of - 0, the original_mean specifies the mean value of the domain - prior to normalization. - original_stddev (float): - If this input feature has been normalized to a standard - deviation of 1.0, the original_stddev specifies the standard - deviation of the domain prior to normalization. - """ - - min_value = proto.Field( - proto.FLOAT, - number=1, - ) - max_value = proto.Field( - proto.FLOAT, - number=2, - ) - original_mean = proto.Field( - proto.FLOAT, - number=3, - ) - original_stddev = proto.Field( - proto.FLOAT, - number=4, - ) - - class Visualization(proto.Message): - r"""Visualization configurations for image explanation. - - Attributes: - type_ (google.cloud.aiplatform_v1.types.ExplanationMetadata.InputMetadata.Visualization.Type): - Type of the image visualization. Only applicable to - [Integrated Gradients - attribution][google.cloud.aiplatform.v1.ExplanationParameters.integrated_gradients_attribution]. - OUTLINES shows regions of attribution, while PIXELS shows - per-pixel attribution. Defaults to OUTLINES. - polarity (google.cloud.aiplatform_v1.types.ExplanationMetadata.InputMetadata.Visualization.Polarity): - Whether to only highlight pixels with - positive contributions, negative or both. - Defaults to POSITIVE. - color_map (google.cloud.aiplatform_v1.types.ExplanationMetadata.InputMetadata.Visualization.ColorMap): - The color scheme used for the highlighted areas. - - Defaults to PINK_GREEN for [Integrated Gradients - attribution][google.cloud.aiplatform.v1.ExplanationParameters.integrated_gradients_attribution], - which shows positive attributions in green and negative in - pink. - - Defaults to VIRIDIS for [XRAI - attribution][google.cloud.aiplatform.v1.ExplanationParameters.xrai_attribution], - which highlights the most influential regions in yellow and - the least influential in blue. - clip_percent_upperbound (float): - Excludes attributions above the specified percentile from - the highlighted areas. Using the clip_percent_upperbound and - clip_percent_lowerbound together can be useful for filtering - out noise and making it easier to see areas of strong - attribution. Defaults to 99.9. - clip_percent_lowerbound (float): - Excludes attributions below the specified - percentile, from the highlighted areas. Defaults - to 62. - overlay_type (google.cloud.aiplatform_v1.types.ExplanationMetadata.InputMetadata.Visualization.OverlayType): - How the original image is displayed in the - visualization. Adjusting the overlay can help - increase visual clarity if the original image - makes it difficult to view the visualization. - Defaults to NONE. - """ - class Type(proto.Enum): - r"""Type of the image visualization. Only applicable to [Integrated - Gradients - attribution][google.cloud.aiplatform.v1.ExplanationParameters.integrated_gradients_attribution]. - """ - TYPE_UNSPECIFIED = 0 - PIXELS = 1 - OUTLINES = 2 - - class Polarity(proto.Enum): - r"""Whether to only highlight pixels with positive contributions, - negative or both. Defaults to POSITIVE. - """ - POLARITY_UNSPECIFIED = 0 - POSITIVE = 1 - NEGATIVE = 2 - BOTH = 3 - - class ColorMap(proto.Enum): - r"""The color scheme used for highlighting areas.""" - COLOR_MAP_UNSPECIFIED = 0 - PINK_GREEN = 1 - VIRIDIS = 2 - RED = 3 - GREEN = 4 - RED_GREEN = 6 - PINK_WHITE_GREEN = 5 - - class OverlayType(proto.Enum): - r"""How the original image is displayed in the visualization.""" - OVERLAY_TYPE_UNSPECIFIED = 0 - NONE = 1 - ORIGINAL = 2 - GRAYSCALE = 3 - MASK_BLACK = 4 - - type_ = proto.Field( - proto.ENUM, - number=1, - enum='ExplanationMetadata.InputMetadata.Visualization.Type', - ) - polarity = proto.Field( - proto.ENUM, - number=2, - enum='ExplanationMetadata.InputMetadata.Visualization.Polarity', - ) - color_map = proto.Field( - proto.ENUM, - number=3, - enum='ExplanationMetadata.InputMetadata.Visualization.ColorMap', - ) - clip_percent_upperbound = proto.Field( - proto.FLOAT, - number=4, - ) - clip_percent_lowerbound = proto.Field( - proto.FLOAT, - number=5, - ) - overlay_type = proto.Field( - proto.ENUM, - number=6, - enum='ExplanationMetadata.InputMetadata.Visualization.OverlayType', - ) - - input_baselines = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=struct_pb2.Value, - ) - input_tensor_name = proto.Field( - proto.STRING, - number=2, - ) - encoding = proto.Field( - proto.ENUM, - number=3, - enum='ExplanationMetadata.InputMetadata.Encoding', - ) - modality = proto.Field( - proto.STRING, - number=4, - ) - feature_value_domain = proto.Field( - proto.MESSAGE, - number=5, - message='ExplanationMetadata.InputMetadata.FeatureValueDomain', - ) - indices_tensor_name = proto.Field( - proto.STRING, - number=6, - ) - dense_shape_tensor_name = proto.Field( - proto.STRING, - number=7, - ) - index_feature_mapping = proto.RepeatedField( - proto.STRING, - number=8, - ) - encoded_tensor_name = proto.Field( - proto.STRING, - number=9, - ) - encoded_baselines = proto.RepeatedField( - proto.MESSAGE, - number=10, - message=struct_pb2.Value, - ) - visualization = proto.Field( - proto.MESSAGE, - number=11, - message='ExplanationMetadata.InputMetadata.Visualization', - ) - group_name = proto.Field( - proto.STRING, - number=12, - ) - - class OutputMetadata(proto.Message): - r"""Metadata of the prediction output to be explained. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - index_display_name_mapping (google.protobuf.struct_pb2.Value): - Static mapping between the index and display name. - - Use this if the outputs are a deterministic n-dimensional - array, e.g. a list of scores of all the classes in a - pre-defined order for a multi-classification Model. It's not - feasible if the outputs are non-deterministic, e.g. the - Model produces top-k classes or sort the outputs by their - values. - - The shape of the value must be an n-dimensional array of - strings. The number of dimensions must match that of the - outputs to be explained. The - [Attribution.output_display_name][google.cloud.aiplatform.v1.Attribution.output_display_name] - is populated by locating in the mapping with - [Attribution.output_index][google.cloud.aiplatform.v1.Attribution.output_index]. - - This field is a member of `oneof`_ ``display_name_mapping``. - display_name_mapping_key (str): - Specify a field name in the prediction to look for the - display name. - - Use this if the prediction contains the display names for - the outputs. - - The display names in the prediction must have the same shape - of the outputs, so that it can be located by - [Attribution.output_index][google.cloud.aiplatform.v1.Attribution.output_index] - for a specific output. - - This field is a member of `oneof`_ ``display_name_mapping``. - output_tensor_name (str): - Name of the output tensor. Required and is - only applicable to Vertex AI provided images for - Tensorflow. - """ - - index_display_name_mapping = proto.Field( - proto.MESSAGE, - number=1, - oneof='display_name_mapping', - message=struct_pb2.Value, - ) - display_name_mapping_key = proto.Field( - proto.STRING, - number=2, - oneof='display_name_mapping', - ) - output_tensor_name = proto.Field( - proto.STRING, - number=3, - ) - - inputs = proto.MapField( - proto.STRING, - proto.MESSAGE, - number=1, - message=InputMetadata, - ) - outputs = proto.MapField( - proto.STRING, - proto.MESSAGE, - number=2, - message=OutputMetadata, - ) - feature_attributions_schema_uri = proto.Field( - proto.STRING, - number=3, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/feature.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/feature.py deleted file mode 100644 index 3bf46e4c2a..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/feature.py +++ /dev/null @@ -1,120 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'Feature', - }, -) - - -class Feature(proto.Message): - r"""Feature Metadata information that describes an attribute of - an entity type. For example, apple is an entity type, and color - is a feature that describes apple. - - Attributes: - name (str): - Immutable. Name of the Feature. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}`` - - The last part feature is assigned by the client. The feature - can be up to 64 characters long and can consist only of - ASCII Latin letters A-Z and a-z, underscore(_), and ASCII - digits 0-9 starting with a letter. The value will be unique - given an entity type. - description (str): - Description of the Feature. - value_type (google.cloud.aiplatform_v1.types.Feature.ValueType): - Required. Immutable. Type of Feature value. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this EntityType - was created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this EntityType - was most recently updated. - labels (Sequence[google.cloud.aiplatform_v1.types.Feature.LabelsEntry]): - Optional. The labels with user-defined - metadata to organize your Features. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. - See https://goo.gl/xmQnxf for more information - on and examples of labels. No more than 64 user - labels can be associated with one Feature - (System labels are excluded)." - System reserved label keys are prefixed with - "aiplatform.googleapis.com/" and are immutable. - etag (str): - Used to perform a consistent read-modify- - rite updates. If not set, a blind "overwrite" - update happens. - """ - class ValueType(proto.Enum): - r"""An enum representing the value type of a feature.""" - VALUE_TYPE_UNSPECIFIED = 0 - BOOL = 1 - BOOL_ARRAY = 2 - DOUBLE = 3 - DOUBLE_ARRAY = 4 - INT64 = 9 - INT64_ARRAY = 10 - STRING = 11 - STRING_ARRAY = 12 - BYTES = 13 - - name = proto.Field( - proto.STRING, - number=1, - ) - description = proto.Field( - proto.STRING, - number=2, - ) - value_type = proto.Field( - proto.ENUM, - number=3, - enum=ValueType, - ) - create_time = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=5, - message=timestamp_pb2.Timestamp, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=6, - ) - etag = proto.Field( - proto.STRING, - number=7, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/feature_monitoring_stats.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/feature_monitoring_stats.py deleted file mode 100644 index b4a44edbb6..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/feature_monitoring_stats.py +++ /dev/null @@ -1,124 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'FeatureStatsAnomaly', - }, -) - - -class FeatureStatsAnomaly(proto.Message): - r"""Stats and Anomaly generated at specific timestamp for specific - Feature. The start_time and end_time are used to define the time - range of the dataset that current stats belongs to, e.g. prediction - traffic is bucketed into prediction datasets by time window. If the - Dataset is not defined by time window, start_time = end_time. - Timestamp of the stats and anomalies always refers to end_time. Raw - stats and anomalies are stored in stats_uri or anomaly_uri in the - tensorflow defined protos. Field data_stats contains almost - identical information with the raw stats in Vertex AI defined proto, - for UI to display. - - Attributes: - score (float): - Feature importance score, only populated when cross-feature - monitoring is enabled. For now only used to represent - feature attribution score within range [0, 1] for - [ModelDeploymentMonitoringObjectiveType.FEATURE_ATTRIBUTION_SKEW][google.cloud.aiplatform.v1.ModelDeploymentMonitoringObjectiveType.FEATURE_ATTRIBUTION_SKEW] - and - [ModelDeploymentMonitoringObjectiveType.FEATURE_ATTRIBUTION_DRIFT][google.cloud.aiplatform.v1.ModelDeploymentMonitoringObjectiveType.FEATURE_ATTRIBUTION_DRIFT]. - stats_uri (str): - Path of the stats file for current feature values in Cloud - Storage bucket. Format: - gs:////stats. Example: - gs://monitoring_bucket/feature_name/stats. Stats are stored - as binary format with Protobuf message - `tensorflow.metadata.v0.FeatureNameStatistics `__. - anomaly_uri (str): - Path of the anomaly file for current feature values in Cloud - Storage bucket. Format: - gs:////anomalies. Example: - gs://monitoring_bucket/feature_name/anomalies. Stats are - stored as binary format with Protobuf message Anoamlies are - stored as binary format with Protobuf message - [tensorflow.metadata.v0.AnomalyInfo] - (https://github.com/tensorflow/metadata/blob/master/tensorflow_metadata/proto/v0/anomalies.proto). - distribution_deviation (float): - Deviation from the current stats to baseline - stats. 1. For categorical feature, the - distribution distance is calculated by - L-inifinity norm. - 2. For numerical feature, the distribution - distance is calculated by Jensen–Shannon - divergence. - anomaly_detection_threshold (float): - This is the threshold used when detecting anomalies. The - threshold can be changed by user, so this one might be - different from - [ThresholdConfig.value][google.cloud.aiplatform.v1.ThresholdConfig.value]. - start_time (google.protobuf.timestamp_pb2.Timestamp): - The start timestamp of window where stats were generated. - For objectives where time window doesn't make sense (e.g. - Featurestore Snapshot Monitoring), start_time is only used - to indicate the monitoring intervals, so it always equals to - (end_time - monitoring_interval). - end_time (google.protobuf.timestamp_pb2.Timestamp): - The end timestamp of window where stats were generated. For - objectives where time window doesn't make sense (e.g. - Featurestore Snapshot Monitoring), end_time indicates the - timestamp of the data used to generate stats (e.g. timestamp - we take snapshots for feature values). - """ - - score = proto.Field( - proto.DOUBLE, - number=1, - ) - stats_uri = proto.Field( - proto.STRING, - number=3, - ) - anomaly_uri = proto.Field( - proto.STRING, - number=4, - ) - distribution_deviation = proto.Field( - proto.DOUBLE, - number=5, - ) - anomaly_detection_threshold = proto.Field( - proto.DOUBLE, - number=9, - ) - start_time = proto.Field( - proto.MESSAGE, - number=7, - message=timestamp_pb2.Timestamp, - ) - end_time = proto.Field( - proto.MESSAGE, - number=8, - message=timestamp_pb2.Timestamp, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/feature_selector.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/feature_selector.py deleted file mode 100644 index 385902a8f6..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/feature_selector.py +++ /dev/null @@ -1,62 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'IdMatcher', - 'FeatureSelector', - }, -) - - -class IdMatcher(proto.Message): - r"""Matcher for Features of an EntityType by Feature ID. - - Attributes: - ids (Sequence[str]): - Required. The following are accepted as ``ids``: - - - A single-element list containing only ``*``, which - selects all Features in the target EntityType, or - - A list containing only Feature IDs, which selects only - Features with those IDs in the target EntityType. - """ - - ids = proto.RepeatedField( - proto.STRING, - number=1, - ) - - -class FeatureSelector(proto.Message): - r"""Selector for Features of an EntityType. - - Attributes: - id_matcher (google.cloud.aiplatform_v1.types.IdMatcher): - Required. Matches Features based on ID. - """ - - id_matcher = proto.Field( - proto.MESSAGE, - number=1, - message='IdMatcher', - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/featurestore.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/featurestore.py deleted file mode 100644 index 77279cc842..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/featurestore.py +++ /dev/null @@ -1,137 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'Featurestore', - }, -) - - -class Featurestore(proto.Message): - r"""Vertex AI Feature Store provides a centralized repository for - organizing, storing, and serving ML features. The Featurestore - is a top-level container for your features and their values. - - Attributes: - name (str): - Output only. Name of the Featurestore. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}`` - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Featurestore - was created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Featurestore - was last updated. - etag (str): - Optional. Used to perform consistent read- - odify-write updates. If not set, a blind - "overwrite" update happens. - labels (Sequence[google.cloud.aiplatform_v1.types.Featurestore.LabelsEntry]): - Optional. The labels with user-defined - metadata to organize your Featurestore. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. - See https://goo.gl/xmQnxf for more information - on and examples of labels. No more than 64 user - labels can be associated with one - Featurestore(System labels are excluded)." - System reserved label keys are prefixed with - "aiplatform.googleapis.com/" and are immutable. - online_serving_config (google.cloud.aiplatform_v1.types.Featurestore.OnlineServingConfig): - Required. Config for online serving - resources. - state (google.cloud.aiplatform_v1.types.Featurestore.State): - Output only. State of the featurestore. - encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec): - Optional. Customer-managed encryption key - spec for data storage. If set, both of the - online and offline data storage will be secured - by this key. - """ - class State(proto.Enum): - r"""Possible states a Featurestore can have.""" - STATE_UNSPECIFIED = 0 - STABLE = 1 - UPDATING = 2 - - class OnlineServingConfig(proto.Message): - r"""OnlineServingConfig specifies the details for provisioning - online serving resources. - - Attributes: - fixed_node_count (int): - The number of nodes for each cluster. The - number of nodes will not scale automatically but - can be scaled manually by providing different - values when updating. - """ - - fixed_node_count = proto.Field( - proto.INT32, - number=2, - ) - - name = proto.Field( - proto.STRING, - number=1, - ) - create_time = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - etag = proto.Field( - proto.STRING, - number=5, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=6, - ) - online_serving_config = proto.Field( - proto.MESSAGE, - number=7, - message=OnlineServingConfig, - ) - state = proto.Field( - proto.ENUM, - number=8, - enum=State, - ) - encryption_spec = proto.Field( - proto.MESSAGE, - number=10, - message=gca_encryption_spec.EncryptionSpec, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/featurestore_online_service.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/featurestore_online_service.py deleted file mode 100644 index dc0b3d9e53..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/featurestore_online_service.py +++ /dev/null @@ -1,382 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1.types import feature_selector as gca_feature_selector -from google.cloud.aiplatform_v1.types import types -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'ReadFeatureValuesRequest', - 'ReadFeatureValuesResponse', - 'StreamingReadFeatureValuesRequest', - 'FeatureValue', - 'FeatureValueList', - }, -) - - -class ReadFeatureValuesRequest(proto.Message): - r"""Request message for - [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1.FeaturestoreOnlineServingService.ReadFeatureValues]. - - Attributes: - entity_type (str): - Required. The resource name of the EntityType for the entity - being read. Value format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}``. - For example, for a machine learning model predicting user - clicks on a website, an EntityType ID could be ``user``. - entity_id (str): - Required. ID for a specific entity. For example, for a - machine learning model predicting user clicks on a website, - an entity ID could be ``user_123``. - feature_selector (google.cloud.aiplatform_v1.types.FeatureSelector): - Required. Selector choosing Features of the - target EntityType. - """ - - entity_type = proto.Field( - proto.STRING, - number=1, - ) - entity_id = proto.Field( - proto.STRING, - number=2, - ) - feature_selector = proto.Field( - proto.MESSAGE, - number=3, - message=gca_feature_selector.FeatureSelector, - ) - - -class ReadFeatureValuesResponse(proto.Message): - r"""Response message for - [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1.FeaturestoreOnlineServingService.ReadFeatureValues]. - - Attributes: - header (google.cloud.aiplatform_v1.types.ReadFeatureValuesResponse.Header): - Response header. - entity_view (google.cloud.aiplatform_v1.types.ReadFeatureValuesResponse.EntityView): - Entity view with Feature values. This may be - the entity in the Featurestore if values for all - Features were requested, or a projection of the - entity in the Featurestore if values for only - some Features were requested. - """ - - class FeatureDescriptor(proto.Message): - r"""Metadata for requested Features. - - Attributes: - id (str): - Feature ID. - """ - - id = proto.Field( - proto.STRING, - number=1, - ) - - class Header(proto.Message): - r"""Response header with metadata for the requested - [ReadFeatureValuesRequest.entity_type][google.cloud.aiplatform.v1.ReadFeatureValuesRequest.entity_type] - and Features. - - Attributes: - entity_type (str): - The resource name of the EntityType from the - [ReadFeatureValuesRequest][google.cloud.aiplatform.v1.ReadFeatureValuesRequest]. - Value format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}``. - feature_descriptors (Sequence[google.cloud.aiplatform_v1.types.ReadFeatureValuesResponse.FeatureDescriptor]): - List of Feature metadata corresponding to each piece of - [ReadFeatureValuesResponse.data][]. - """ - - entity_type = proto.Field( - proto.STRING, - number=1, - ) - feature_descriptors = proto.RepeatedField( - proto.MESSAGE, - number=2, - message='ReadFeatureValuesResponse.FeatureDescriptor', - ) - - class EntityView(proto.Message): - r"""Entity view with Feature values. - - Attributes: - entity_id (str): - ID of the requested entity. - data (Sequence[google.cloud.aiplatform_v1.types.ReadFeatureValuesResponse.EntityView.Data]): - Each piece of data holds the k requested values for one - requested Feature. If no values for the requested Feature - exist, the corresponding cell will be empty. This has the - same size and is in the same order as the features from the - header - [ReadFeatureValuesResponse.header][google.cloud.aiplatform.v1.ReadFeatureValuesResponse.header]. - """ - - class Data(proto.Message): - r"""Container to hold value(s), successive in time, for one - Feature from the request. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - value (google.cloud.aiplatform_v1.types.FeatureValue): - Feature value if a single value is requested. - - This field is a member of `oneof`_ ``data``. - values (google.cloud.aiplatform_v1.types.FeatureValueList): - Feature values list if values, successive in - time, are requested. If the requested number of - values is greater than the number of existing - Feature values, nonexistent values are omitted - instead of being returned as empty. - - This field is a member of `oneof`_ ``data``. - """ - - value = proto.Field( - proto.MESSAGE, - number=1, - oneof='data', - message='FeatureValue', - ) - values = proto.Field( - proto.MESSAGE, - number=2, - oneof='data', - message='FeatureValueList', - ) - - entity_id = proto.Field( - proto.STRING, - number=1, - ) - data = proto.RepeatedField( - proto.MESSAGE, - number=2, - message='ReadFeatureValuesResponse.EntityView.Data', - ) - - header = proto.Field( - proto.MESSAGE, - number=1, - message=Header, - ) - entity_view = proto.Field( - proto.MESSAGE, - number=2, - message=EntityView, - ) - - -class StreamingReadFeatureValuesRequest(proto.Message): - r"""Request message for - [FeaturestoreOnlineServingService.StreamingFeatureValuesRead][]. - - Attributes: - entity_type (str): - Required. The resource name of the entities' type. Value - format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}``. - For example, for a machine learning model predicting user - clicks on a website, an EntityType ID could be ``user``. - entity_ids (Sequence[str]): - Required. IDs of entities to read Feature values of. The - maximum number of IDs is 100. For example, for a machine - learning model predicting user clicks on a website, an - entity ID could be ``user_123``. - feature_selector (google.cloud.aiplatform_v1.types.FeatureSelector): - Required. Selector choosing Features of the - target EntityType. Feature IDs will be - deduplicated. - """ - - entity_type = proto.Field( - proto.STRING, - number=1, - ) - entity_ids = proto.RepeatedField( - proto.STRING, - number=2, - ) - feature_selector = proto.Field( - proto.MESSAGE, - number=3, - message=gca_feature_selector.FeatureSelector, - ) - - -class FeatureValue(proto.Message): - r"""Value for a feature. - NEXT ID: 15 - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - bool_value (bool): - Bool type feature value. - - This field is a member of `oneof`_ ``value``. - double_value (float): - Double type feature value. - - This field is a member of `oneof`_ ``value``. - int64_value (int): - Int64 feature value. - - This field is a member of `oneof`_ ``value``. - string_value (str): - String feature value. - - This field is a member of `oneof`_ ``value``. - bool_array_value (google.cloud.aiplatform_v1.types.BoolArray): - A list of bool type feature value. - - This field is a member of `oneof`_ ``value``. - double_array_value (google.cloud.aiplatform_v1.types.DoubleArray): - A list of double type feature value. - - This field is a member of `oneof`_ ``value``. - int64_array_value (google.cloud.aiplatform_v1.types.Int64Array): - A list of int64 type feature value. - - This field is a member of `oneof`_ ``value``. - string_array_value (google.cloud.aiplatform_v1.types.StringArray): - A list of string type feature value. - - This field is a member of `oneof`_ ``value``. - bytes_value (bytes): - Bytes feature value. - - This field is a member of `oneof`_ ``value``. - metadata (google.cloud.aiplatform_v1.types.FeatureValue.Metadata): - Metadata of feature value. - """ - - class Metadata(proto.Message): - r"""Metadata of feature value. - - Attributes: - generate_time (google.protobuf.timestamp_pb2.Timestamp): - Feature generation timestamp. Typically, it - is provided by user at feature ingestion time. - If not, feature store will use the system - timestamp when the data is ingested into feature - store. For streaming ingestion, the time, - aligned by days, must be no older than five - years (1825 days) and no later than one year - (366 days) in the future. - """ - - generate_time = proto.Field( - proto.MESSAGE, - number=1, - message=timestamp_pb2.Timestamp, - ) - - bool_value = proto.Field( - proto.BOOL, - number=1, - oneof='value', - ) - double_value = proto.Field( - proto.DOUBLE, - number=2, - oneof='value', - ) - int64_value = proto.Field( - proto.INT64, - number=5, - oneof='value', - ) - string_value = proto.Field( - proto.STRING, - number=6, - oneof='value', - ) - bool_array_value = proto.Field( - proto.MESSAGE, - number=7, - oneof='value', - message=types.BoolArray, - ) - double_array_value = proto.Field( - proto.MESSAGE, - number=8, - oneof='value', - message=types.DoubleArray, - ) - int64_array_value = proto.Field( - proto.MESSAGE, - number=11, - oneof='value', - message=types.Int64Array, - ) - string_array_value = proto.Field( - proto.MESSAGE, - number=12, - oneof='value', - message=types.StringArray, - ) - bytes_value = proto.Field( - proto.BYTES, - number=13, - oneof='value', - ) - metadata = proto.Field( - proto.MESSAGE, - number=14, - message=Metadata, - ) - - -class FeatureValueList(proto.Message): - r"""Container for list of values. - - Attributes: - values (Sequence[google.cloud.aiplatform_v1.types.FeatureValue]): - A list of feature values. All of them should - be the same data type. - """ - - values = proto.RepeatedField( - proto.MESSAGE, - number=1, - message='FeatureValue', - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/featurestore_service.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/featurestore_service.py deleted file mode 100644 index 81538e3878..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/featurestore_service.py +++ /dev/null @@ -1,1653 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1.types import entity_type as gca_entity_type -from google.cloud.aiplatform_v1.types import feature as gca_feature -from google.cloud.aiplatform_v1.types import feature_selector as gca_feature_selector -from google.cloud.aiplatform_v1.types import featurestore as gca_featurestore -from google.cloud.aiplatform_v1.types import io -from google.cloud.aiplatform_v1.types import operation -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'CreateFeaturestoreRequest', - 'GetFeaturestoreRequest', - 'ListFeaturestoresRequest', - 'ListFeaturestoresResponse', - 'UpdateFeaturestoreRequest', - 'DeleteFeaturestoreRequest', - 'ImportFeatureValuesRequest', - 'ImportFeatureValuesResponse', - 'BatchReadFeatureValuesRequest', - 'ExportFeatureValuesRequest', - 'DestinationFeatureSetting', - 'FeatureValueDestination', - 'ExportFeatureValuesResponse', - 'BatchReadFeatureValuesResponse', - 'CreateEntityTypeRequest', - 'GetEntityTypeRequest', - 'ListEntityTypesRequest', - 'ListEntityTypesResponse', - 'UpdateEntityTypeRequest', - 'DeleteEntityTypeRequest', - 'CreateFeatureRequest', - 'BatchCreateFeaturesRequest', - 'BatchCreateFeaturesResponse', - 'GetFeatureRequest', - 'ListFeaturesRequest', - 'ListFeaturesResponse', - 'SearchFeaturesRequest', - 'SearchFeaturesResponse', - 'UpdateFeatureRequest', - 'DeleteFeatureRequest', - 'CreateFeaturestoreOperationMetadata', - 'UpdateFeaturestoreOperationMetadata', - 'ImportFeatureValuesOperationMetadata', - 'ExportFeatureValuesOperationMetadata', - 'BatchReadFeatureValuesOperationMetadata', - 'CreateEntityTypeOperationMetadata', - 'CreateFeatureOperationMetadata', - 'BatchCreateFeaturesOperationMetadata', - }, -) - - -class CreateFeaturestoreRequest(proto.Message): - r"""Request message for - [FeaturestoreService.CreateFeaturestore][google.cloud.aiplatform.v1.FeaturestoreService.CreateFeaturestore]. - - Attributes: - parent (str): - Required. The resource name of the Location to create - Featurestores. Format: - ``projects/{project}/locations/{location}'`` - featurestore (google.cloud.aiplatform_v1.types.Featurestore): - Required. The Featurestore to create. - featurestore_id (str): - Required. The ID to use for this Featurestore, which will - become the final component of the Featurestore's resource - name. - - This value may be up to 60 characters, and valid characters - are ``[a-z0-9_]``. The first character cannot be a number. - - The value must be unique within the project and location. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - featurestore = proto.Field( - proto.MESSAGE, - number=2, - message=gca_featurestore.Featurestore, - ) - featurestore_id = proto.Field( - proto.STRING, - number=3, - ) - - -class GetFeaturestoreRequest(proto.Message): - r"""Request message for - [FeaturestoreService.GetFeaturestore][google.cloud.aiplatform.v1.FeaturestoreService.GetFeaturestore]. - - Attributes: - name (str): - Required. The name of the Featurestore - resource. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListFeaturestoresRequest(proto.Message): - r"""Request message for - [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1.FeaturestoreService.ListFeaturestores]. - - Attributes: - parent (str): - Required. The resource name of the Location to list - Featurestores. Format: - ``projects/{project}/locations/{location}`` - filter (str): - Lists the featurestores that match the filter expression. - The following fields are supported: - - - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, - ``<=``, and ``>=`` comparisons. Values must be in RFC - 3339 format. - - ``update_time``: Supports ``=``, ``!=``, ``<``, ``>``, - ``<=``, and ``>=`` comparisons. Values must be in RFC - 3339 format. - - ``online_serving_config.fixed_node_count``: Supports - ``=``, ``!=``, ``<``, ``>``, ``<=``, and ``>=`` - comparisons. - - ``labels``: Supports key-value equality and key presence. - - Examples: - - - ``create_time > "2020-01-01" OR update_time > "2020-01-01"`` - Featurestores created or updated after 2020-01-01. - - ``labels.env = "prod"`` Featurestores with label "env" - set to "prod". - page_size (int): - The maximum number of Featurestores to - return. The service may return fewer than this - value. If unspecified, at most 100 Featurestores - will be returned. The maximum value is 100; any - value greater than 100 will be coerced to 100. - page_token (str): - A page token, received from a previous - [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1.FeaturestoreService.ListFeaturestores] - call. Provide this to retrieve the subsequent page. - - When paginating, all other parameters provided to - [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1.FeaturestoreService.ListFeaturestores] - must match the call that provided the page token. - order_by (str): - A comma-separated list of fields to order by, sorted in - ascending order. Use "desc" after a field name for - descending. Supported Fields: - - - ``create_time`` - - ``update_time`` - - ``online_serving_config.fixed_node_count`` - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - order_by = proto.Field( - proto.STRING, - number=5, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=6, - message=field_mask_pb2.FieldMask, - ) - - -class ListFeaturestoresResponse(proto.Message): - r"""Response message for - [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1.FeaturestoreService.ListFeaturestores]. - - Attributes: - featurestores (Sequence[google.cloud.aiplatform_v1.types.Featurestore]): - The Featurestores matching the request. - next_page_token (str): - A token, which can be sent as - [ListFeaturestoresRequest.page_token][google.cloud.aiplatform.v1.ListFeaturestoresRequest.page_token] - to retrieve the next page. If this field is omitted, there - are no subsequent pages. - """ - - @property - def raw_page(self): - return self - - featurestores = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_featurestore.Featurestore, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class UpdateFeaturestoreRequest(proto.Message): - r"""Request message for - [FeaturestoreService.UpdateFeaturestore][google.cloud.aiplatform.v1.FeaturestoreService.UpdateFeaturestore]. - - Attributes: - featurestore (google.cloud.aiplatform_v1.types.Featurestore): - Required. The Featurestore's ``name`` field is used to - identify the Featurestore to be updated. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}`` - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Field mask is used to specify the fields to be overwritten - in the Featurestore resource by the update. The fields - specified in the update_mask are relative to the resource, - not the full request. A field will be overwritten if it is - in the mask. If the user does not provide a mask then only - the non-empty fields present in the request will be - overwritten. Set the update_mask to ``*`` to override all - fields. - - Updatable fields: - - - ``labels`` - - ``online_serving_config.fixed_node_count`` - """ - - featurestore = proto.Field( - proto.MESSAGE, - number=1, - message=gca_featurestore.Featurestore, - ) - update_mask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - - -class DeleteFeaturestoreRequest(proto.Message): - r"""Request message for - [FeaturestoreService.DeleteFeaturestore][google.cloud.aiplatform.v1.FeaturestoreService.DeleteFeaturestore]. - - Attributes: - name (str): - Required. The name of the Featurestore to be deleted. - Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}`` - force (bool): - If set to true, any EntityTypes and Features - for this Featurestore will also be deleted. - (Otherwise, the request will only work if the - Featurestore has no EntityTypes.) - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - force = proto.Field( - proto.BOOL, - number=2, - ) - - -class ImportFeatureValuesRequest(proto.Message): - r"""Request message for - [FeaturestoreService.ImportFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.ImportFeatureValues]. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - avro_source (google.cloud.aiplatform_v1.types.AvroSource): - - This field is a member of `oneof`_ ``source``. - bigquery_source (google.cloud.aiplatform_v1.types.BigQuerySource): - - This field is a member of `oneof`_ ``source``. - csv_source (google.cloud.aiplatform_v1.types.CsvSource): - - This field is a member of `oneof`_ ``source``. - feature_time_field (str): - Source column that holds the Feature - timestamp for all Feature values in each entity. - - This field is a member of `oneof`_ ``feature_time_source``. - feature_time (google.protobuf.timestamp_pb2.Timestamp): - Single Feature timestamp for all entities - being imported. The timestamp must not have - higher than millisecond precision. - - This field is a member of `oneof`_ ``feature_time_source``. - entity_type (str): - Required. The resource name of the EntityType grouping the - Features for which values are being imported. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}`` - entity_id_field (str): - Source column that holds entity IDs. If not provided, entity - IDs are extracted from the column named ``entity_id``. - feature_specs (Sequence[google.cloud.aiplatform_v1.types.ImportFeatureValuesRequest.FeatureSpec]): - Required. Specifications defining which Feature values to - import from the entity. The request fails if no - feature_specs are provided, and having multiple - feature_specs for one Feature is not allowed. - disable_online_serving (bool): - If set, data will not be imported for online - serving. This is typically used for backfilling, - where Feature generation timestamps are not in - the timestamp range needed for online serving. - worker_count (int): - Specifies the number of workers that are used - to write data to the Featurestore. Consider the - online serving capacity that you require to - achieve the desired import throughput without - interfering with online serving. The value must - be positive, and less than or equal to 100. If - not set, defaults to using 1 worker. The low - count ensures minimal impact on online serving - performance. - """ - - class FeatureSpec(proto.Message): - r"""Defines the Feature value(s) to import. - - Attributes: - id (str): - Required. ID of the Feature to import values - of. This Feature must exist in the target - EntityType, or the request will fail. - source_field (str): - Source column to get the Feature values from. - If not set, uses the column with the same name - as the Feature ID. - """ - - id = proto.Field( - proto.STRING, - number=1, - ) - source_field = proto.Field( - proto.STRING, - number=2, - ) - - avro_source = proto.Field( - proto.MESSAGE, - number=2, - oneof='source', - message=io.AvroSource, - ) - bigquery_source = proto.Field( - proto.MESSAGE, - number=3, - oneof='source', - message=io.BigQuerySource, - ) - csv_source = proto.Field( - proto.MESSAGE, - number=4, - oneof='source', - message=io.CsvSource, - ) - feature_time_field = proto.Field( - proto.STRING, - number=6, - oneof='feature_time_source', - ) - feature_time = proto.Field( - proto.MESSAGE, - number=7, - oneof='feature_time_source', - message=timestamp_pb2.Timestamp, - ) - entity_type = proto.Field( - proto.STRING, - number=1, - ) - entity_id_field = proto.Field( - proto.STRING, - number=5, - ) - feature_specs = proto.RepeatedField( - proto.MESSAGE, - number=8, - message=FeatureSpec, - ) - disable_online_serving = proto.Field( - proto.BOOL, - number=9, - ) - worker_count = proto.Field( - proto.INT32, - number=11, - ) - - -class ImportFeatureValuesResponse(proto.Message): - r"""Response message for - [FeaturestoreService.ImportFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.ImportFeatureValues]. - - Attributes: - imported_entity_count (int): - Number of entities that have been imported by - the operation. - imported_feature_value_count (int): - Number of Feature values that have been - imported by the operation. - invalid_row_count (int): - The number of rows in input source that weren't imported due - to either - - - Not having any featureValues. - - Having a null entityId. - - Having a null timestamp. - - Not being parsable (applicable for CSV sources). - """ - - imported_entity_count = proto.Field( - proto.INT64, - number=1, - ) - imported_feature_value_count = proto.Field( - proto.INT64, - number=2, - ) - invalid_row_count = proto.Field( - proto.INT64, - number=6, - ) - - -class BatchReadFeatureValuesRequest(proto.Message): - r"""Request message for - [FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.BatchReadFeatureValues]. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - csv_read_instances (google.cloud.aiplatform_v1.types.CsvSource): - Each read instance consists of exactly one read timestamp - and one or more entity IDs identifying entities of the - corresponding EntityTypes whose Features are requested. - - Each output instance contains Feature values of requested - entities concatenated together as of the read time. - - An example read instance may be - ``foo_entity_id, bar_entity_id, 2020-01-01T10:00:00.123Z``. - - An example output instance may be - ``foo_entity_id, bar_entity_id, 2020-01-01T10:00:00.123Z, foo_entity_feature1_value, bar_entity_feature2_value``. - - Timestamp in each read instance must be millisecond-aligned. - - ``csv_read_instances`` are read instances stored in a - plain-text CSV file. The header should be: - [ENTITY_TYPE_ID1], [ENTITY_TYPE_ID2], ..., timestamp - - The columns can be in any order. - - Values in the timestamp column must use the RFC 3339 format, - e.g. ``2012-07-30T10:43:17.123Z``. - - This field is a member of `oneof`_ ``read_option``. - bigquery_read_instances (google.cloud.aiplatform_v1.types.BigQuerySource): - Similar to csv_read_instances, but from BigQuery source. - - This field is a member of `oneof`_ ``read_option``. - featurestore (str): - Required. The resource name of the Featurestore from which - to query Feature values. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}`` - destination (google.cloud.aiplatform_v1.types.FeatureValueDestination): - Required. Specifies output location and - format. - pass_through_fields (Sequence[google.cloud.aiplatform_v1.types.BatchReadFeatureValuesRequest.PassThroughField]): - When not empty, the specified fields in the - \*_read_instances source will be joined as-is in the output, - in addition to those fields from the Featurestore Entity. - - For BigQuery source, the type of the pass-through values - will be automatically inferred. For CSV source, the - pass-through values will be passed as opaque bytes. - entity_type_specs (Sequence[google.cloud.aiplatform_v1.types.BatchReadFeatureValuesRequest.EntityTypeSpec]): - Required. Specifies EntityType grouping Features to read - values of and settings. Each EntityType referenced in - [BatchReadFeatureValuesRequest.entity_type_specs] must have - a column specifying entity IDs in the EntityType in - [BatchReadFeatureValuesRequest.request][] . - """ - - class PassThroughField(proto.Message): - r"""Describe pass-through fields in read_instance source. - - Attributes: - field_name (str): - Required. The name of the field in the CSV header or the - name of the column in BigQuery table. The naming restriction - is the same as - [Feature.name][google.cloud.aiplatform.v1.Feature.name]. - """ - - field_name = proto.Field( - proto.STRING, - number=1, - ) - - class EntityTypeSpec(proto.Message): - r"""Selects Features of an EntityType to read values of and - specifies read settings. - - Attributes: - entity_type_id (str): - Required. ID of the EntityType to select Features. The - EntityType id is the - [entity_type_id][google.cloud.aiplatform.v1.CreateEntityTypeRequest.entity_type_id] - specified during EntityType creation. - feature_selector (google.cloud.aiplatform_v1.types.FeatureSelector): - Required. Selectors choosing which Feature - values to read from the EntityType. - settings (Sequence[google.cloud.aiplatform_v1.types.DestinationFeatureSetting]): - Per-Feature settings for the batch read. - """ - - entity_type_id = proto.Field( - proto.STRING, - number=1, - ) - feature_selector = proto.Field( - proto.MESSAGE, - number=2, - message=gca_feature_selector.FeatureSelector, - ) - settings = proto.RepeatedField( - proto.MESSAGE, - number=3, - message='DestinationFeatureSetting', - ) - - csv_read_instances = proto.Field( - proto.MESSAGE, - number=3, - oneof='read_option', - message=io.CsvSource, - ) - bigquery_read_instances = proto.Field( - proto.MESSAGE, - number=5, - oneof='read_option', - message=io.BigQuerySource, - ) - featurestore = proto.Field( - proto.STRING, - number=1, - ) - destination = proto.Field( - proto.MESSAGE, - number=4, - message='FeatureValueDestination', - ) - pass_through_fields = proto.RepeatedField( - proto.MESSAGE, - number=8, - message=PassThroughField, - ) - entity_type_specs = proto.RepeatedField( - proto.MESSAGE, - number=7, - message=EntityTypeSpec, - ) - - -class ExportFeatureValuesRequest(proto.Message): - r"""Request message for - [FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.ExportFeatureValues]. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - snapshot_export (google.cloud.aiplatform_v1.types.ExportFeatureValuesRequest.SnapshotExport): - Exports the latest Feature values of all - entities of the EntityType within a time range. - - This field is a member of `oneof`_ ``mode``. - full_export (google.cloud.aiplatform_v1.types.ExportFeatureValuesRequest.FullExport): - Exports all historical values of all entities - of the EntityType within a time range - - This field is a member of `oneof`_ ``mode``. - entity_type (str): - Required. The resource name of the EntityType from which to - export Feature values. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - destination (google.cloud.aiplatform_v1.types.FeatureValueDestination): - Required. Specifies destination location and - format. - feature_selector (google.cloud.aiplatform_v1.types.FeatureSelector): - Required. Selects Features to export values - of. - settings (Sequence[google.cloud.aiplatform_v1.types.DestinationFeatureSetting]): - Per-Feature export settings. - """ - - class SnapshotExport(proto.Message): - r"""Describes exporting the latest Feature values of all entities of the - EntityType between [start_time, snapshot_time]. - - Attributes: - snapshot_time (google.protobuf.timestamp_pb2.Timestamp): - Exports Feature values as of this timestamp. - If not set, retrieve values as of now. - Timestamp, if present, must not have higher than - millisecond precision. - start_time (google.protobuf.timestamp_pb2.Timestamp): - Excludes Feature values with feature - generation timestamp before this timestamp. If - not set, retrieve oldest values kept in Feature - Store. Timestamp, if present, must not have - higher than millisecond precision. - """ - - snapshot_time = proto.Field( - proto.MESSAGE, - number=1, - message=timestamp_pb2.Timestamp, - ) - start_time = proto.Field( - proto.MESSAGE, - number=2, - message=timestamp_pb2.Timestamp, - ) - - class FullExport(proto.Message): - r"""Describes exporting all historical Feature values of all entities of - the EntityType between [start_time, end_time]. - - Attributes: - start_time (google.protobuf.timestamp_pb2.Timestamp): - Excludes Feature values with feature - generation timestamp before this timestamp. If - not set, retrieve oldest values kept in Feature - Store. Timestamp, if present, must not have - higher than millisecond precision. - end_time (google.protobuf.timestamp_pb2.Timestamp): - Exports Feature values as of this timestamp. - If not set, retrieve values as of now. - Timestamp, if present, must not have higher than - millisecond precision. - """ - - start_time = proto.Field( - proto.MESSAGE, - number=2, - message=timestamp_pb2.Timestamp, - ) - end_time = proto.Field( - proto.MESSAGE, - number=1, - message=timestamp_pb2.Timestamp, - ) - - snapshot_export = proto.Field( - proto.MESSAGE, - number=3, - oneof='mode', - message=SnapshotExport, - ) - full_export = proto.Field( - proto.MESSAGE, - number=7, - oneof='mode', - message=FullExport, - ) - entity_type = proto.Field( - proto.STRING, - number=1, - ) - destination = proto.Field( - proto.MESSAGE, - number=4, - message='FeatureValueDestination', - ) - feature_selector = proto.Field( - proto.MESSAGE, - number=5, - message=gca_feature_selector.FeatureSelector, - ) - settings = proto.RepeatedField( - proto.MESSAGE, - number=6, - message='DestinationFeatureSetting', - ) - - -class DestinationFeatureSetting(proto.Message): - r""" - - Attributes: - feature_id (str): - Required. The ID of the Feature to apply the - setting to. - destination_field (str): - Specify the field name in the export - destination. If not specified, Feature ID is - used. - """ - - feature_id = proto.Field( - proto.STRING, - number=1, - ) - destination_field = proto.Field( - proto.STRING, - number=2, - ) - - -class FeatureValueDestination(proto.Message): - r"""A destination location for Feature values and format. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - bigquery_destination (google.cloud.aiplatform_v1.types.BigQueryDestination): - Output in BigQuery format. - [BigQueryDestination.output_uri][google.cloud.aiplatform.v1.BigQueryDestination.output_uri] - in - [FeatureValueDestination.bigquery_destination][google.cloud.aiplatform.v1.FeatureValueDestination.bigquery_destination] - must refer to a table. - - This field is a member of `oneof`_ ``destination``. - tfrecord_destination (google.cloud.aiplatform_v1.types.TFRecordDestination): - Output in TFRecord format. - - Below are the mapping from Feature value type in - Featurestore to Feature value type in TFRecord: - - :: - - Value type in Featurestore | Value type in TFRecord - DOUBLE, DOUBLE_ARRAY | FLOAT_LIST - INT64, INT64_ARRAY | INT64_LIST - STRING, STRING_ARRAY, BYTES | BYTES_LIST - true -> byte_string("true"), false -> byte_string("false") - BOOL, BOOL_ARRAY (true, false) | BYTES_LIST - - This field is a member of `oneof`_ ``destination``. - csv_destination (google.cloud.aiplatform_v1.types.CsvDestination): - Output in CSV format. Array Feature value - types are not allowed in CSV format. - - This field is a member of `oneof`_ ``destination``. - """ - - bigquery_destination = proto.Field( - proto.MESSAGE, - number=1, - oneof='destination', - message=io.BigQueryDestination, - ) - tfrecord_destination = proto.Field( - proto.MESSAGE, - number=2, - oneof='destination', - message=io.TFRecordDestination, - ) - csv_destination = proto.Field( - proto.MESSAGE, - number=3, - oneof='destination', - message=io.CsvDestination, - ) - - -class ExportFeatureValuesResponse(proto.Message): - r"""Response message for - [FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.ExportFeatureValues]. - - """ - - -class BatchReadFeatureValuesResponse(proto.Message): - r"""Response message for - [FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1.FeaturestoreService.BatchReadFeatureValues]. - - """ - - -class CreateEntityTypeRequest(proto.Message): - r"""Request message for - [FeaturestoreService.CreateEntityType][google.cloud.aiplatform.v1.FeaturestoreService.CreateEntityType]. - - Attributes: - parent (str): - Required. The resource name of the Featurestore to create - EntityTypes. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}`` - entity_type (google.cloud.aiplatform_v1.types.EntityType): - The EntityType to create. - entity_type_id (str): - Required. The ID to use for the EntityType, which will - become the final component of the EntityType's resource - name. - - This value may be up to 60 characters, and valid characters - are ``[a-z0-9_]``. The first character cannot be a number. - - The value must be unique within a featurestore. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - entity_type = proto.Field( - proto.MESSAGE, - number=2, - message=gca_entity_type.EntityType, - ) - entity_type_id = proto.Field( - proto.STRING, - number=3, - ) - - -class GetEntityTypeRequest(proto.Message): - r"""Request message for - [FeaturestoreService.GetEntityType][google.cloud.aiplatform.v1.FeaturestoreService.GetEntityType]. - - Attributes: - name (str): - Required. The name of the EntityType resource. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListEntityTypesRequest(proto.Message): - r"""Request message for - [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1.FeaturestoreService.ListEntityTypes]. - - Attributes: - parent (str): - Required. The resource name of the Featurestore to list - EntityTypes. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}`` - filter (str): - Lists the EntityTypes that match the filter expression. The - following filters are supported: - - - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, - ``>=``, and ``<=`` comparisons. Values must be in RFC - 3339 format. - - ``update_time``: Supports ``=``, ``!=``, ``<``, ``>``, - ``>=``, and ``<=`` comparisons. Values must be in RFC - 3339 format. - - ``labels``: Supports key-value equality as well as key - presence. - - Examples: - - - ``create_time > \"2020-01-31T15:30:00.000000Z\" OR update_time > \"2020-01-31T15:30:00.000000Z\"`` - --> EntityTypes created or updated after - 2020-01-31T15:30:00.000000Z. - - ``labels.active = yes AND labels.env = prod`` --> - EntityTypes having both (active: yes) and (env: prod) - labels. - - ``labels.env: *`` --> Any EntityType which has a label - with 'env' as the key. - page_size (int): - The maximum number of EntityTypes to return. - The service may return fewer than this value. If - unspecified, at most 1000 EntityTypes will be - returned. The maximum value is 1000; any value - greater than 1000 will be coerced to 1000. - page_token (str): - A page token, received from a previous - [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1.FeaturestoreService.ListEntityTypes] - call. Provide this to retrieve the subsequent page. - - When paginating, all other parameters provided to - [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1.FeaturestoreService.ListEntityTypes] - must match the call that provided the page token. - order_by (str): - A comma-separated list of fields to order by, sorted in - ascending order. Use "desc" after a field name for - descending. - - Supported fields: - - - ``entity_type_id`` - - ``create_time`` - - ``update_time`` - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - order_by = proto.Field( - proto.STRING, - number=5, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=6, - message=field_mask_pb2.FieldMask, - ) - - -class ListEntityTypesResponse(proto.Message): - r"""Response message for - [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1.FeaturestoreService.ListEntityTypes]. - - Attributes: - entity_types (Sequence[google.cloud.aiplatform_v1.types.EntityType]): - The EntityTypes matching the request. - next_page_token (str): - A token, which can be sent as - [ListEntityTypesRequest.page_token][google.cloud.aiplatform.v1.ListEntityTypesRequest.page_token] - to retrieve the next page. If this field is omitted, there - are no subsequent pages. - """ - - @property - def raw_page(self): - return self - - entity_types = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_entity_type.EntityType, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class UpdateEntityTypeRequest(proto.Message): - r"""Request message for - [FeaturestoreService.UpdateEntityType][google.cloud.aiplatform.v1.FeaturestoreService.UpdateEntityType]. - - Attributes: - entity_type (google.cloud.aiplatform_v1.types.EntityType): - Required. The EntityType's ``name`` field is used to - identify the EntityType to be updated. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Field mask is used to specify the fields to be overwritten - in the EntityType resource by the update. The fields - specified in the update_mask are relative to the resource, - not the full request. A field will be overwritten if it is - in the mask. If the user does not provide a mask then only - the non-empty fields present in the request will be - overwritten. Set the update_mask to ``*`` to override all - fields. - - Updatable fields: - - - ``description`` - - ``labels`` - - ``monitoring_config.snapshot_analysis.disabled`` - - ``monitoring_config.snapshot_analysis.monitoring_interval`` - """ - - entity_type = proto.Field( - proto.MESSAGE, - number=1, - message=gca_entity_type.EntityType, - ) - update_mask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - - -class DeleteEntityTypeRequest(proto.Message): - r"""Request message for [FeaturestoreService.DeleteEntityTypes][]. - - Attributes: - name (str): - Required. The name of the EntityType to be deleted. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - force (bool): - If set to true, any Features for this - EntityType will also be deleted. (Otherwise, the - request will only work if the EntityType has no - Features.) - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - force = proto.Field( - proto.BOOL, - number=2, - ) - - -class CreateFeatureRequest(proto.Message): - r"""Request message for - [FeaturestoreService.CreateFeature][google.cloud.aiplatform.v1.FeaturestoreService.CreateFeature]. - - Attributes: - parent (str): - Required. The resource name of the EntityType to create a - Feature. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - feature (google.cloud.aiplatform_v1.types.Feature): - Required. The Feature to create. - feature_id (str): - Required. The ID to use for the Feature, which will become - the final component of the Feature's resource name. - - This value may be up to 60 characters, and valid characters - are ``[a-z0-9_]``. The first character cannot be a number. - - The value must be unique within an EntityType. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - feature = proto.Field( - proto.MESSAGE, - number=2, - message=gca_feature.Feature, - ) - feature_id = proto.Field( - proto.STRING, - number=3, - ) - - -class BatchCreateFeaturesRequest(proto.Message): - r"""Request message for - [FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1.FeaturestoreService.BatchCreateFeatures]. - - Attributes: - parent (str): - Required. The resource name of the EntityType to create the - batch of Features under. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - requests (Sequence[google.cloud.aiplatform_v1.types.CreateFeatureRequest]): - Required. The request message specifying the Features to - create. All Features must be created under the same parent - EntityType. The ``parent`` field in each child request - message can be omitted. If ``parent`` is set in a child - request, then the value must match the ``parent`` value in - this request message. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - requests = proto.RepeatedField( - proto.MESSAGE, - number=2, - message='CreateFeatureRequest', - ) - - -class BatchCreateFeaturesResponse(proto.Message): - r"""Response message for - [FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1.FeaturestoreService.BatchCreateFeatures]. - - Attributes: - features (Sequence[google.cloud.aiplatform_v1.types.Feature]): - The Features created. - """ - - features = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_feature.Feature, - ) - - -class GetFeatureRequest(proto.Message): - r"""Request message for - [FeaturestoreService.GetFeature][google.cloud.aiplatform.v1.FeaturestoreService.GetFeature]. - - Attributes: - name (str): - Required. The name of the Feature resource. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListFeaturesRequest(proto.Message): - r"""Request message for - [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1.FeaturestoreService.ListFeatures]. - - Attributes: - parent (str): - Required. The resource name of the Location to list - Features. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - filter (str): - Lists the Features that match the filter expression. The - following filters are supported: - - - ``value_type``: Supports = and != comparisons. - - ``create_time``: Supports =, !=, <, >, >=, and <= - comparisons. Values must be in RFC 3339 format. - - ``update_time``: Supports =, !=, <, >, >=, and <= - comparisons. Values must be in RFC 3339 format. - - ``labels``: Supports key-value equality as well as key - presence. - - Examples: - - - ``value_type = DOUBLE`` --> Features whose type is - DOUBLE. - - ``create_time > \"2020-01-31T15:30:00.000000Z\" OR update_time > \"2020-01-31T15:30:00.000000Z\"`` - --> EntityTypes created or updated after - 2020-01-31T15:30:00.000000Z. - - ``labels.active = yes AND labels.env = prod`` --> - Features having both (active: yes) and (env: prod) - labels. - - ``labels.env: *`` --> Any Feature which has a label with - 'env' as the key. - page_size (int): - The maximum number of Features to return. The - service may return fewer than this value. If - unspecified, at most 1000 Features will be - returned. The maximum value is 1000; any value - greater than 1000 will be coerced to 1000. - page_token (str): - A page token, received from a previous - [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1.FeaturestoreService.ListFeatures] - call. Provide this to retrieve the subsequent page. - - When paginating, all other parameters provided to - [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1.FeaturestoreService.ListFeatures] - must match the call that provided the page token. - order_by (str): - A comma-separated list of fields to order by, sorted in - ascending order. Use "desc" after a field name for - descending. Supported fields: - - - ``feature_id`` - - ``value_type`` - - ``create_time`` - - ``update_time`` - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - latest_stats_count (int): - If set, return the most recent - [ListFeaturesRequest.latest_stats_count][google.cloud.aiplatform.v1.ListFeaturesRequest.latest_stats_count] - of stats for each Feature in response. Valid value is [0, - 10]. If number of stats exists < - [ListFeaturesRequest.latest_stats_count][google.cloud.aiplatform.v1.ListFeaturesRequest.latest_stats_count], - return all existing stats. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - order_by = proto.Field( - proto.STRING, - number=5, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=6, - message=field_mask_pb2.FieldMask, - ) - latest_stats_count = proto.Field( - proto.INT32, - number=7, - ) - - -class ListFeaturesResponse(proto.Message): - r"""Response message for - [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1.FeaturestoreService.ListFeatures]. - - Attributes: - features (Sequence[google.cloud.aiplatform_v1.types.Feature]): - The Features matching the request. - next_page_token (str): - A token, which can be sent as - [ListFeaturesRequest.page_token][google.cloud.aiplatform.v1.ListFeaturesRequest.page_token] - to retrieve the next page. If this field is omitted, there - are no subsequent pages. - """ - - @property - def raw_page(self): - return self - - features = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_feature.Feature, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class SearchFeaturesRequest(proto.Message): - r"""Request message for - [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1.FeaturestoreService.SearchFeatures]. - - Attributes: - location (str): - Required. The resource name of the Location to search - Features. Format: - ``projects/{project}/locations/{location}`` - query (str): - Query string that is a conjunction of field-restricted - queries and/or field-restricted filters. Field-restricted - queries and filters can be combined using ``AND`` to form a - conjunction. - - A field query is in the form FIELD:QUERY. This implicitly - checks if QUERY exists as a substring within Feature's - FIELD. The QUERY and the FIELD are converted to a sequence - of words (i.e. tokens) for comparison. This is done by: - - - Removing leading/trailing whitespace and tokenizing the - search value. Characters that are not one of alphanumeric - ``[a-zA-Z0-9]``, underscore ``_``, or asterisk ``*`` are - treated as delimiters for tokens. ``*`` is treated as a - wildcard that matches characters within a token. - - Ignoring case. - - Prepending an asterisk to the first and appending an - asterisk to the last token in QUERY. - - A QUERY must be either a singular token or a phrase. A - phrase is one or multiple words enclosed in double quotation - marks ("). With phrases, the order of the words is - important. Words in the phrase must be matching in order and - consecutively. - - Supported FIELDs for field-restricted queries: - - - ``feature_id`` - - ``description`` - - ``entity_type_id`` - - Examples: - - - ``feature_id: foo`` --> Matches a Feature with ID - containing the substring ``foo`` (eg. ``foo``, - ``foofeature``, ``barfoo``). - - ``feature_id: foo*feature`` --> Matches a Feature with ID - containing the substring ``foo*feature`` (eg. - ``foobarfeature``). - - ``feature_id: foo AND description: bar`` --> Matches a - Feature with ID containing the substring ``foo`` and - description containing the substring ``bar``. - - Besides field queries, the following exact-match filters are - supported. The exact-match filters do not support wildcards. - Unlike field-restricted queries, exact-match filters are - case-sensitive. - - - ``feature_id``: Supports = comparisons. - - ``description``: Supports = comparisons. Multi-token - filters should be enclosed in quotes. - - ``entity_type_id``: Supports = comparisons. - - ``value_type``: Supports = and != comparisons. - - ``labels``: Supports key-value equality as well as key - presence. - - ``featurestore_id``: Supports = comparisons. - - Examples: - - - ``description = "foo bar"`` --> Any Feature with - description exactly equal to ``foo bar`` - - ``value_type = DOUBLE`` --> Features whose type is - DOUBLE. - - ``labels.active = yes AND labels.env = prod`` --> - Features having both (active: yes) and (env: prod) - labels. - - ``labels.env: *`` --> Any Feature which has a label with - ``env`` as the key. - page_size (int): - The maximum number of Features to return. The - service may return fewer than this value. If - unspecified, at most 100 Features will be - returned. The maximum value is 100; any value - greater than 100 will be coerced to 100. - page_token (str): - A page token, received from a previous - [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1.FeaturestoreService.SearchFeatures] - call. Provide this to retrieve the subsequent page. - - When paginating, all other parameters provided to - [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1.FeaturestoreService.SearchFeatures], - except ``page_size``, must match the call that provided the - page token. - """ - - location = proto.Field( - proto.STRING, - number=1, - ) - query = proto.Field( - proto.STRING, - number=3, - ) - page_size = proto.Field( - proto.INT32, - number=4, - ) - page_token = proto.Field( - proto.STRING, - number=5, - ) - - -class SearchFeaturesResponse(proto.Message): - r"""Response message for - [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1.FeaturestoreService.SearchFeatures]. - - Attributes: - features (Sequence[google.cloud.aiplatform_v1.types.Feature]): - The Features matching the request. - - Fields returned: - - - ``name`` - - ``description`` - - ``labels`` - - ``create_time`` - - ``update_time`` - next_page_token (str): - A token, which can be sent as - [SearchFeaturesRequest.page_token][google.cloud.aiplatform.v1.SearchFeaturesRequest.page_token] - to retrieve the next page. If this field is omitted, there - are no subsequent pages. - """ - - @property - def raw_page(self): - return self - - features = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_feature.Feature, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class UpdateFeatureRequest(proto.Message): - r"""Request message for - [FeaturestoreService.UpdateFeature][google.cloud.aiplatform.v1.FeaturestoreService.UpdateFeature]. - - Attributes: - feature (google.cloud.aiplatform_v1.types.Feature): - Required. The Feature's ``name`` field is used to identify - the Feature to be updated. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}`` - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Field mask is used to specify the fields to be overwritten - in the Features resource by the update. The fields specified - in the update_mask are relative to the resource, not the - full request. A field will be overwritten if it is in the - mask. If the user does not provide a mask then only the - non-empty fields present in the request will be overwritten. - Set the update_mask to ``*`` to override all fields. - - Updatable fields: - - - ``description`` - - ``labels`` - - ``monitoring_config.snapshot_analysis.disabled`` - - ``monitoring_config.snapshot_analysis.monitoring_interval`` - """ - - feature = proto.Field( - proto.MESSAGE, - number=1, - message=gca_feature.Feature, - ) - update_mask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - - -class DeleteFeatureRequest(proto.Message): - r"""Request message for - [FeaturestoreService.DeleteFeature][google.cloud.aiplatform.v1.FeaturestoreService.DeleteFeature]. - - Attributes: - name (str): - Required. The name of the Features to be deleted. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class CreateFeaturestoreOperationMetadata(proto.Message): - r"""Details of operations that perform create Featurestore. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): - Operation metadata for Featurestore. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class UpdateFeaturestoreOperationMetadata(proto.Message): - r"""Details of operations that perform update Featurestore. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): - Operation metadata for Featurestore. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class ImportFeatureValuesOperationMetadata(proto.Message): - r"""Details of operations that perform import Feature values. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): - Operation metadata for Featurestore import - Feature values. - imported_entity_count (int): - Number of entities that have been imported by - the operation. - imported_feature_value_count (int): - Number of Feature values that have been - imported by the operation. - invalid_row_count (int): - The number of rows in input source that weren't imported due - to either - - - Not having any featureValues. - - Having a null entityId. - - Having a null timestamp. - - Not being parsable (applicable for CSV sources). - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - imported_entity_count = proto.Field( - proto.INT64, - number=2, - ) - imported_feature_value_count = proto.Field( - proto.INT64, - number=3, - ) - invalid_row_count = proto.Field( - proto.INT64, - number=6, - ) - - -class ExportFeatureValuesOperationMetadata(proto.Message): - r"""Details of operations that exports Features values. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): - Operation metadata for Featurestore export - Feature values. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class BatchReadFeatureValuesOperationMetadata(proto.Message): - r"""Details of operations that batch reads Feature values. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): - Operation metadata for Featurestore batch - read Features values. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class CreateEntityTypeOperationMetadata(proto.Message): - r"""Details of operations that perform create EntityType. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): - Operation metadata for EntityType. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class CreateFeatureOperationMetadata(proto.Message): - r"""Details of operations that perform create Feature. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): - Operation metadata for Feature. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class BatchCreateFeaturesOperationMetadata(proto.Message): - r"""Details of operations that perform batch create Features. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): - Operation metadata for Feature. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/hyperparameter_tuning_job.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/hyperparameter_tuning_job.py deleted file mode 100644 index c402d8b609..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/hyperparameter_tuning_job.py +++ /dev/null @@ -1,182 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1.types import custom_job -from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec -from google.cloud.aiplatform_v1.types import job_state -from google.cloud.aiplatform_v1.types import study -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'HyperparameterTuningJob', - }, -) - - -class HyperparameterTuningJob(proto.Message): - r"""Represents a HyperparameterTuningJob. A - HyperparameterTuningJob has a Study specification and multiple - CustomJobs with identical CustomJob specification. - - Attributes: - name (str): - Output only. Resource name of the - HyperparameterTuningJob. - display_name (str): - Required. The display name of the - HyperparameterTuningJob. The name can be up to - 128 characters long and can be consist of any - UTF-8 characters. - study_spec (google.cloud.aiplatform_v1.types.StudySpec): - Required. Study configuration of the - HyperparameterTuningJob. - max_trial_count (int): - Required. The desired total number of Trials. - parallel_trial_count (int): - Required. The desired number of Trials to run - in parallel. - max_failed_trial_count (int): - The number of failed Trials that need to be - seen before failing the HyperparameterTuningJob. - If set to 0, Vertex AI decides how many Trials - must fail before the whole job fails. - trial_job_spec (google.cloud.aiplatform_v1.types.CustomJobSpec): - Required. The spec of a trial job. The same - spec applies to the CustomJobs created in all - the trials. - trials (Sequence[google.cloud.aiplatform_v1.types.Trial]): - Output only. Trials of the - HyperparameterTuningJob. - state (google.cloud.aiplatform_v1.types.JobState): - Output only. The detailed state of the job. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the - HyperparameterTuningJob was created. - start_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the HyperparameterTuningJob for the - first time entered the ``JOB_STATE_RUNNING`` state. - end_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the HyperparameterTuningJob entered - any of the following states: ``JOB_STATE_SUCCEEDED``, - ``JOB_STATE_FAILED``, ``JOB_STATE_CANCELLED``. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the - HyperparameterTuningJob was most recently - updated. - error (google.rpc.status_pb2.Status): - Output only. Only populated when job's state is - JOB_STATE_FAILED or JOB_STATE_CANCELLED. - labels (Sequence[google.cloud.aiplatform_v1.types.HyperparameterTuningJob.LabelsEntry]): - The labels with user-defined metadata to - organize HyperparameterTuningJobs. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. - See https://goo.gl/xmQnxf for more information - and examples of labels. - encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec): - Customer-managed encryption key options for a - HyperparameterTuningJob. If this is set, then - all resources created by the - HyperparameterTuningJob will be encrypted with - the provided encryption key. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - study_spec = proto.Field( - proto.MESSAGE, - number=4, - message=study.StudySpec, - ) - max_trial_count = proto.Field( - proto.INT32, - number=5, - ) - parallel_trial_count = proto.Field( - proto.INT32, - number=6, - ) - max_failed_trial_count = proto.Field( - proto.INT32, - number=7, - ) - trial_job_spec = proto.Field( - proto.MESSAGE, - number=8, - message=custom_job.CustomJobSpec, - ) - trials = proto.RepeatedField( - proto.MESSAGE, - number=9, - message=study.Trial, - ) - state = proto.Field( - proto.ENUM, - number=10, - enum=job_state.JobState, - ) - create_time = proto.Field( - proto.MESSAGE, - number=11, - message=timestamp_pb2.Timestamp, - ) - start_time = proto.Field( - proto.MESSAGE, - number=12, - message=timestamp_pb2.Timestamp, - ) - end_time = proto.Field( - proto.MESSAGE, - number=13, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=14, - message=timestamp_pb2.Timestamp, - ) - error = proto.Field( - proto.MESSAGE, - number=15, - message=status_pb2.Status, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=16, - ) - encryption_spec = proto.Field( - proto.MESSAGE, - number=17, - message=gca_encryption_spec.EncryptionSpec, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/index.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/index.py deleted file mode 100644 index b7bf828cd2..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/index.py +++ /dev/null @@ -1,142 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1.types import deployed_index_ref -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'Index', - }, -) - - -class Index(proto.Message): - r"""A representation of a collection of database items organized - in a way that allows for approximate nearest neighbor (a.k.a - ANN) algorithms search. - - Attributes: - name (str): - Output only. The resource name of the Index. - display_name (str): - Required. The display name of the Index. - The name can be up to 128 characters long and - can be consist of any UTF-8 characters. - description (str): - The description of the Index. - metadata_schema_uri (str): - Immutable. Points to a YAML file stored on Google Cloud - Storage describing additional information about the Index, - that is specific to it. Unset if the Index does not have any - additional information. The schema is defined as an OpenAPI - 3.0.2 `Schema - Object `__. - Note: The URI given on output will be immutable and probably - different, including the URI scheme, than the one given on - input. The output URI will point to a location where the - user only has a read access. - metadata (google.protobuf.struct_pb2.Value): - An additional information about the Index; the schema of the - metadata can be found in - [metadata_schema][google.cloud.aiplatform.v1.Index.metadata_schema_uri]. - deployed_indexes (Sequence[google.cloud.aiplatform_v1.types.DeployedIndexRef]): - Output only. The pointers to DeployedIndexes - created from this Index. An Index can be only - deleted if all its DeployedIndexes had been - undeployed first. - etag (str): - Used to perform consistent read-modify-write - updates. If not set, a blind "overwrite" update - happens. - labels (Sequence[google.cloud.aiplatform_v1.types.Index.LabelsEntry]): - The labels with user-defined metadata to - organize your Indexes. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. - See https://goo.gl/xmQnxf for more information - and examples of labels. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Index was - created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Index was most recently - updated. This also includes any update to the contents of - the Index. Note that Operations working on this Index may - have their - [Operations.metadata.generic_metadata.update_time] - [google.cloud.aiplatform.v1.GenericOperationMetadata.update_time] - a little after the value of this timestamp, yet that does - not mean their results are not already reflected in the - Index. Result of any successfully completed Operation on the - Index is reflected in it. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - description = proto.Field( - proto.STRING, - number=3, - ) - metadata_schema_uri = proto.Field( - proto.STRING, - number=4, - ) - metadata = proto.Field( - proto.MESSAGE, - number=6, - message=struct_pb2.Value, - ) - deployed_indexes = proto.RepeatedField( - proto.MESSAGE, - number=7, - message=deployed_index_ref.DeployedIndexRef, - ) - etag = proto.Field( - proto.STRING, - number=8, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=9, - ) - create_time = proto.Field( - proto.MESSAGE, - number=10, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=11, - message=timestamp_pb2.Timestamp, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/index_endpoint.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/index_endpoint.py deleted file mode 100644 index f1f87ebf25..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/index_endpoint.py +++ /dev/null @@ -1,371 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1.types import machine_resources -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'IndexEndpoint', - 'DeployedIndex', - 'DeployedIndexAuthConfig', - 'IndexPrivateEndpoints', - }, -) - - -class IndexEndpoint(proto.Message): - r"""Indexes are deployed into it. An IndexEndpoint can have - multiple DeployedIndexes. - - Attributes: - name (str): - Output only. The resource name of the - IndexEndpoint. - display_name (str): - Required. The display name of the - IndexEndpoint. The name can be up to 128 - characters long and can consist of any UTF-8 - characters. - description (str): - The description of the IndexEndpoint. - deployed_indexes (Sequence[google.cloud.aiplatform_v1.types.DeployedIndex]): - Output only. The indexes deployed in this - endpoint. - etag (str): - Used to perform consistent read-modify-write - updates. If not set, a blind "overwrite" update - happens. - labels (Sequence[google.cloud.aiplatform_v1.types.IndexEndpoint.LabelsEntry]): - The labels with user-defined metadata to - organize your IndexEndpoints. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. - See https://goo.gl/xmQnxf for more information - and examples of labels. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - IndexEndpoint was created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - IndexEndpoint was last updated. This timestamp - is not updated when the endpoint's - DeployedIndexes are updated, e.g. due to updates - of the original Indexes they are the deployments - of. - network (str): - Optional. The full name of the Google Compute Engine - `network `__ - to which the IndexEndpoint should be peered. - - Private services access must already be configured for the - network. If left unspecified, the Endpoint is not peered - with any network. - - Only one of the fields, - [network][google.cloud.aiplatform.v1.IndexEndpoint.network] - or - [enable_private_service_connect][google.cloud.aiplatform.v1.IndexEndpoint.enable_private_service_connect], - can be set. - - `Format `__: - projects/{project}/global/networks/{network}. Where - {project} is a project number, as in '12345', and {network} - is network name. - enable_private_service_connect (bool): - Optional. If true, expose the IndexEndpoint via private - service connect. - - Only one of the fields, - [network][google.cloud.aiplatform.v1.IndexEndpoint.network] - or - [enable_private_service_connect][google.cloud.aiplatform.v1.IndexEndpoint.enable_private_service_connect], - can be set. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - description = proto.Field( - proto.STRING, - number=3, - ) - deployed_indexes = proto.RepeatedField( - proto.MESSAGE, - number=4, - message='DeployedIndex', - ) - etag = proto.Field( - proto.STRING, - number=5, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=6, - ) - create_time = proto.Field( - proto.MESSAGE, - number=7, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=8, - message=timestamp_pb2.Timestamp, - ) - network = proto.Field( - proto.STRING, - number=9, - ) - enable_private_service_connect = proto.Field( - proto.BOOL, - number=10, - ) - - -class DeployedIndex(proto.Message): - r"""A deployment of an Index. IndexEndpoints contain one or more - DeployedIndexes. - - Attributes: - id (str): - Required. The user specified ID of the - DeployedIndex. The ID can be up to 128 - characters long and must start with a letter and - only contain letters, numbers, and underscores. - The ID must be unique within the project it is - created in. - index (str): - Required. The name of the Index this is the - deployment of. We may refer to this Index as the - DeployedIndex's "original" Index. - display_name (str): - The display name of the DeployedIndex. If not provided upon - creation, the Index's display_name is used. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when the DeployedIndex - was created. - private_endpoints (google.cloud.aiplatform_v1.types.IndexPrivateEndpoints): - Output only. Provides paths for users to send requests - directly to the deployed index services running on Cloud via - private services access. This field is populated if - [network][google.cloud.aiplatform.v1.IndexEndpoint.network] - is configured. - index_sync_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. The DeployedIndex may depend on various data on - its original Index. Additionally when certain changes to the - original Index are being done (e.g. when what the Index - contains is being changed) the DeployedIndex may be - asynchronously updated in the background to reflect this - changes. If this timestamp's value is at least the - [Index.update_time][google.cloud.aiplatform.v1.Index.update_time] - of the original Index, it means that this DeployedIndex and - the original Index are in sync. If this timestamp is older, - then to see which updates this DeployedIndex already - contains (and which not), one must - [list][Operations.ListOperations] [Operations][Operation] - [working][Operation.name] on the original Index. Only the - successfully completed Operations with - [Operations.metadata.generic_metadata.update_time] - [google.cloud.aiplatform.v1.GenericOperationMetadata.update_time] - equal or before this sync time are contained in this - DeployedIndex. - automatic_resources (google.cloud.aiplatform_v1.types.AutomaticResources): - Optional. A description of resources that the DeployedIndex - uses, which to large degree are decided by Vertex AI, and - optionally allows only a modest additional configuration. If - min_replica_count is not set, the default value is 2 (we - don't provide SLA when min_replica_count=1). If - max_replica_count is not set, the default value is - min_replica_count. The max allowed replica count is 1000. - enable_access_logging (bool): - Optional. If true, private endpoint's access - logs are sent to StackDriver Logging. - These logs are like standard server access logs, - containing information like timestamp and - latency for each MatchRequest. - Note that Stackdriver logs may incur a cost, - especially if the deployed index receives a high - queries per second rate (QPS). Estimate your - costs before enabling this option. - deployed_index_auth_config (google.cloud.aiplatform_v1.types.DeployedIndexAuthConfig): - Optional. If set, the authentication is - enabled for the private endpoint. - reserved_ip_ranges (Sequence[str]): - Optional. A list of reserved ip ranges under - the VPC network that can be used for this - DeployedIndex. - If set, we will deploy the index within the - provided ip ranges. Otherwise, the index might - be deployed to any ip ranges under the provided - VPC network. - - The value sohuld be the name of the address - (https://cloud.google.com/compute/docs/reference/rest/v1/addresses) - Example: 'vertex-ai-ip-range'. - deployment_group (str): - Optional. The deployment group can be no longer than 64 - characters (eg: 'test', 'prod'). If not set, we will use the - 'default' deployment group. - - Creating ``deployment_groups`` with ``reserved_ip_ranges`` - is a recommended practice when the peered network has - multiple peering ranges. This creates your deployments from - predictable IP spaces for easier traffic administration. - Also, one deployment_group (except 'default') can only be - used with the same reserved_ip_ranges which means if the - deployment_group has been used with reserved_ip_ranges: [a, - b, c], using it with [a, b] or [d, e] is disallowed. - - Note: we only support up to 5 deployment groups(not - including 'default'). - """ - - id = proto.Field( - proto.STRING, - number=1, - ) - index = proto.Field( - proto.STRING, - number=2, - ) - display_name = proto.Field( - proto.STRING, - number=3, - ) - create_time = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - private_endpoints = proto.Field( - proto.MESSAGE, - number=5, - message='IndexPrivateEndpoints', - ) - index_sync_time = proto.Field( - proto.MESSAGE, - number=6, - message=timestamp_pb2.Timestamp, - ) - automatic_resources = proto.Field( - proto.MESSAGE, - number=7, - message=machine_resources.AutomaticResources, - ) - enable_access_logging = proto.Field( - proto.BOOL, - number=8, - ) - deployed_index_auth_config = proto.Field( - proto.MESSAGE, - number=9, - message='DeployedIndexAuthConfig', - ) - reserved_ip_ranges = proto.RepeatedField( - proto.STRING, - number=10, - ) - deployment_group = proto.Field( - proto.STRING, - number=11, - ) - - -class DeployedIndexAuthConfig(proto.Message): - r"""Used to set up the auth on the DeployedIndex's private - endpoint. - - Attributes: - auth_provider (google.cloud.aiplatform_v1.types.DeployedIndexAuthConfig.AuthProvider): - Defines the authentication provider that the - DeployedIndex uses. - """ - - class AuthProvider(proto.Message): - r"""Configuration for an authentication provider, including support for - `JSON Web Token - (JWT) `__. - - Attributes: - audiences (Sequence[str]): - The list of JWT - `audiences `__. - that are allowed to access. A JWT containing any of these - audiences will be accepted. - allowed_issuers (Sequence[str]): - A list of allowed JWT issuers. Each entry must be a valid - Google service account, in the following format: - - ``service-account-name@project-id.iam.gserviceaccount.com`` - """ - - audiences = proto.RepeatedField( - proto.STRING, - number=1, - ) - allowed_issuers = proto.RepeatedField( - proto.STRING, - number=2, - ) - - auth_provider = proto.Field( - proto.MESSAGE, - number=1, - message=AuthProvider, - ) - - -class IndexPrivateEndpoints(proto.Message): - r"""IndexPrivateEndpoints proto is used to provide paths for users to - send requests via private endpoints (e.g. private service access, - private service connect). To send request via private service - access, use match_grpc_address. To send request via private service - connect, use service_attachment. - - Attributes: - match_grpc_address (str): - Output only. The ip address used to send - match gRPC requests. - service_attachment (str): - Output only. The name of the service - attachment resource. Populated if private - service connect is enabled. - """ - - match_grpc_address = proto.Field( - proto.STRING, - number=1, - ) - service_attachment = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/index_endpoint_service.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/index_endpoint_service.py deleted file mode 100644 index f369b53380..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/index_endpoint_service.py +++ /dev/null @@ -1,419 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1.types import index_endpoint as gca_index_endpoint -from google.cloud.aiplatform_v1.types import operation -from google.protobuf import field_mask_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'CreateIndexEndpointRequest', - 'CreateIndexEndpointOperationMetadata', - 'GetIndexEndpointRequest', - 'ListIndexEndpointsRequest', - 'ListIndexEndpointsResponse', - 'UpdateIndexEndpointRequest', - 'DeleteIndexEndpointRequest', - 'DeployIndexRequest', - 'DeployIndexResponse', - 'DeployIndexOperationMetadata', - 'UndeployIndexRequest', - 'UndeployIndexResponse', - 'UndeployIndexOperationMetadata', - 'MutateDeployedIndexRequest', - 'MutateDeployedIndexResponse', - 'MutateDeployedIndexOperationMetadata', - }, -) - - -class CreateIndexEndpointRequest(proto.Message): - r"""Request message for - [IndexEndpointService.CreateIndexEndpoint][google.cloud.aiplatform.v1.IndexEndpointService.CreateIndexEndpoint]. - - Attributes: - parent (str): - Required. The resource name of the Location to create the - IndexEndpoint in. Format: - ``projects/{project}/locations/{location}`` - index_endpoint (google.cloud.aiplatform_v1.types.IndexEndpoint): - Required. The IndexEndpoint to create. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - index_endpoint = proto.Field( - proto.MESSAGE, - number=2, - message=gca_index_endpoint.IndexEndpoint, - ) - - -class CreateIndexEndpointOperationMetadata(proto.Message): - r"""Runtime operation information for - [IndexEndpointService.CreateIndexEndpoint][google.cloud.aiplatform.v1.IndexEndpointService.CreateIndexEndpoint]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): - The operation generic information. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class GetIndexEndpointRequest(proto.Message): - r"""Request message for - [IndexEndpointService.GetIndexEndpoint][google.cloud.aiplatform.v1.IndexEndpointService.GetIndexEndpoint] - - Attributes: - name (str): - Required. The name of the IndexEndpoint resource. Format: - ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListIndexEndpointsRequest(proto.Message): - r"""Request message for - [IndexEndpointService.ListIndexEndpoints][google.cloud.aiplatform.v1.IndexEndpointService.ListIndexEndpoints]. - - Attributes: - parent (str): - Required. The resource name of the Location from which to - list the IndexEndpoints. Format: - ``projects/{project}/locations/{location}`` - filter (str): - Optional. An expression for filtering the results of the - request. For field names both snake_case and camelCase are - supported. - - - ``index_endpoint`` supports = and !=. ``index_endpoint`` - represents the IndexEndpoint ID, ie. the last segment of - the IndexEndpoint's - [resourcename][google.cloud.aiplatform.v1.IndexEndpoint.name]. - - ``display_name`` supports =, != and regex() (uses - `re2 `__ - syntax) - - ``labels`` supports general map functions that is: - ``labels.key=value`` - key:value equality - ``labels.key:* or labels:key - key existence A key including a space must be quoted.``\ labels."a - key"`. - - Some examples: - - - ``index_endpoint="1"`` - - ``display_name="myDisplayName"`` - - \`regex(display_name, "^A") -> The display name starts - with an A. - - ``labels.myKey="myValue"`` - page_size (int): - Optional. The standard list page size. - page_token (str): - Optional. The standard list page token. Typically obtained - via - [ListIndexEndpointsResponse.next_page_token][google.cloud.aiplatform.v1.ListIndexEndpointsResponse.next_page_token] - of the previous - [IndexEndpointService.ListIndexEndpoints][google.cloud.aiplatform.v1.IndexEndpointService.ListIndexEndpoints] - call. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Optional. Mask specifying which fields to - read. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=5, - message=field_mask_pb2.FieldMask, - ) - - -class ListIndexEndpointsResponse(proto.Message): - r"""Response message for - [IndexEndpointService.ListIndexEndpoints][google.cloud.aiplatform.v1.IndexEndpointService.ListIndexEndpoints]. - - Attributes: - index_endpoints (Sequence[google.cloud.aiplatform_v1.types.IndexEndpoint]): - List of IndexEndpoints in the requested page. - next_page_token (str): - A token to retrieve next page of results. Pass to - [ListIndexEndpointsRequest.page_token][google.cloud.aiplatform.v1.ListIndexEndpointsRequest.page_token] - to obtain that page. - """ - - @property - def raw_page(self): - return self - - index_endpoints = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_index_endpoint.IndexEndpoint, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class UpdateIndexEndpointRequest(proto.Message): - r"""Request message for - [IndexEndpointService.UpdateIndexEndpoint][google.cloud.aiplatform.v1.IndexEndpointService.UpdateIndexEndpoint]. - - Attributes: - index_endpoint (google.cloud.aiplatform_v1.types.IndexEndpoint): - Required. The IndexEndpoint which replaces - the resource on the server. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The update mask applies to the resource. See - [google.protobuf.FieldMask][google.protobuf.FieldMask]. - """ - - index_endpoint = proto.Field( - proto.MESSAGE, - number=1, - message=gca_index_endpoint.IndexEndpoint, - ) - update_mask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - - -class DeleteIndexEndpointRequest(proto.Message): - r"""Request message for - [IndexEndpointService.DeleteIndexEndpoint][google.cloud.aiplatform.v1.IndexEndpointService.DeleteIndexEndpoint]. - - Attributes: - name (str): - Required. The name of the IndexEndpoint resource to be - deleted. Format: - ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class DeployIndexRequest(proto.Message): - r"""Request message for - [IndexEndpointService.DeployIndex][google.cloud.aiplatform.v1.IndexEndpointService.DeployIndex]. - - Attributes: - index_endpoint (str): - Required. The name of the IndexEndpoint resource into which - to deploy an Index. Format: - ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` - deployed_index (google.cloud.aiplatform_v1.types.DeployedIndex): - Required. The DeployedIndex to be created - within the IndexEndpoint. - """ - - index_endpoint = proto.Field( - proto.STRING, - number=1, - ) - deployed_index = proto.Field( - proto.MESSAGE, - number=2, - message=gca_index_endpoint.DeployedIndex, - ) - - -class DeployIndexResponse(proto.Message): - r"""Response message for - [IndexEndpointService.DeployIndex][google.cloud.aiplatform.v1.IndexEndpointService.DeployIndex]. - - Attributes: - deployed_index (google.cloud.aiplatform_v1.types.DeployedIndex): - The DeployedIndex that had been deployed in - the IndexEndpoint. - """ - - deployed_index = proto.Field( - proto.MESSAGE, - number=1, - message=gca_index_endpoint.DeployedIndex, - ) - - -class DeployIndexOperationMetadata(proto.Message): - r"""Runtime operation information for - [IndexEndpointService.DeployIndex][google.cloud.aiplatform.v1.IndexEndpointService.DeployIndex]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): - The operation generic information. - deployed_index_id (str): - The unique index id specified by user - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - deployed_index_id = proto.Field( - proto.STRING, - number=2, - ) - - -class UndeployIndexRequest(proto.Message): - r"""Request message for - [IndexEndpointService.UndeployIndex][google.cloud.aiplatform.v1.IndexEndpointService.UndeployIndex]. - - Attributes: - index_endpoint (str): - Required. The name of the IndexEndpoint resource from which - to undeploy an Index. Format: - ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` - deployed_index_id (str): - Required. The ID of the DeployedIndex to be - undeployed from the IndexEndpoint. - """ - - index_endpoint = proto.Field( - proto.STRING, - number=1, - ) - deployed_index_id = proto.Field( - proto.STRING, - number=2, - ) - - -class UndeployIndexResponse(proto.Message): - r"""Response message for - [IndexEndpointService.UndeployIndex][google.cloud.aiplatform.v1.IndexEndpointService.UndeployIndex]. - - """ - - -class UndeployIndexOperationMetadata(proto.Message): - r"""Runtime operation information for - [IndexEndpointService.UndeployIndex][google.cloud.aiplatform.v1.IndexEndpointService.UndeployIndex]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): - The operation generic information. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class MutateDeployedIndexRequest(proto.Message): - r"""Request message for - [IndexEndpointService.MutateDeployedIndex][google.cloud.aiplatform.v1.IndexEndpointService.MutateDeployedIndex]. - - Attributes: - index_endpoint (str): - Required. The name of the IndexEndpoint resource into which - to deploy an Index. Format: - ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` - deployed_index (google.cloud.aiplatform_v1.types.DeployedIndex): - Required. The DeployedIndex to be updated within the - IndexEndpoint. Currently, the updatable fields are - [DeployedIndex][automatic_resources] and - [DeployedIndex][dedicated_resources] - """ - - index_endpoint = proto.Field( - proto.STRING, - number=1, - ) - deployed_index = proto.Field( - proto.MESSAGE, - number=2, - message=gca_index_endpoint.DeployedIndex, - ) - - -class MutateDeployedIndexResponse(proto.Message): - r"""Response message for - [IndexEndpointService.MutateDeployedIndex][google.cloud.aiplatform.v1.IndexEndpointService.MutateDeployedIndex]. - - Attributes: - deployed_index (google.cloud.aiplatform_v1.types.DeployedIndex): - The DeployedIndex that had been updated in - the IndexEndpoint. - """ - - deployed_index = proto.Field( - proto.MESSAGE, - number=1, - message=gca_index_endpoint.DeployedIndex, - ) - - -class MutateDeployedIndexOperationMetadata(proto.Message): - r"""Runtime operation information for - [IndexEndpointService.MutateDeployedIndex][google.cloud.aiplatform.v1.IndexEndpointService.MutateDeployedIndex]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): - The operation generic information. - deployed_index_id (str): - The unique index id specified by user - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - deployed_index_id = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/index_service.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/index_service.py deleted file mode 100644 index 85c0c87644..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/index_service.py +++ /dev/null @@ -1,362 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1.types import index as gca_index -from google.cloud.aiplatform_v1.types import operation -from google.protobuf import field_mask_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'CreateIndexRequest', - 'CreateIndexOperationMetadata', - 'GetIndexRequest', - 'ListIndexesRequest', - 'ListIndexesResponse', - 'UpdateIndexRequest', - 'UpdateIndexOperationMetadata', - 'DeleteIndexRequest', - 'NearestNeighborSearchOperationMetadata', - }, -) - - -class CreateIndexRequest(proto.Message): - r"""Request message for - [IndexService.CreateIndex][google.cloud.aiplatform.v1.IndexService.CreateIndex]. - - Attributes: - parent (str): - Required. The resource name of the Location to create the - Index in. Format: - ``projects/{project}/locations/{location}`` - index (google.cloud.aiplatform_v1.types.Index): - Required. The Index to create. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - index = proto.Field( - proto.MESSAGE, - number=2, - message=gca_index.Index, - ) - - -class CreateIndexOperationMetadata(proto.Message): - r"""Runtime operation information for - [IndexService.CreateIndex][google.cloud.aiplatform.v1.IndexService.CreateIndex]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): - The operation generic information. - nearest_neighbor_search_operation_metadata (google.cloud.aiplatform_v1.types.NearestNeighborSearchOperationMetadata): - The operation metadata with regard to - Matching Engine Index operation. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - nearest_neighbor_search_operation_metadata = proto.Field( - proto.MESSAGE, - number=2, - message='NearestNeighborSearchOperationMetadata', - ) - - -class GetIndexRequest(proto.Message): - r"""Request message for - [IndexService.GetIndex][google.cloud.aiplatform.v1.IndexService.GetIndex] - - Attributes: - name (str): - Required. The name of the Index resource. Format: - ``projects/{project}/locations/{location}/indexes/{index}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListIndexesRequest(proto.Message): - r"""Request message for - [IndexService.ListIndexes][google.cloud.aiplatform.v1.IndexService.ListIndexes]. - - Attributes: - parent (str): - Required. The resource name of the Location from which to - list the Indexes. Format: - ``projects/{project}/locations/{location}`` - filter (str): - The standard list filter. - page_size (int): - The standard list page size. - page_token (str): - The standard list page token. Typically obtained via - [ListIndexesResponse.next_page_token][google.cloud.aiplatform.v1.ListIndexesResponse.next_page_token] - of the previous - [IndexService.ListIndexes][google.cloud.aiplatform.v1.IndexService.ListIndexes] - call. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=5, - message=field_mask_pb2.FieldMask, - ) - - -class ListIndexesResponse(proto.Message): - r"""Response message for - [IndexService.ListIndexes][google.cloud.aiplatform.v1.IndexService.ListIndexes]. - - Attributes: - indexes (Sequence[google.cloud.aiplatform_v1.types.Index]): - List of indexes in the requested page. - next_page_token (str): - A token to retrieve next page of results. Pass to - [ListIndexesRequest.page_token][google.cloud.aiplatform.v1.ListIndexesRequest.page_token] - to obtain that page. - """ - - @property - def raw_page(self): - return self - - indexes = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_index.Index, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class UpdateIndexRequest(proto.Message): - r"""Request message for - [IndexService.UpdateIndex][google.cloud.aiplatform.v1.IndexService.UpdateIndex]. - - Attributes: - index (google.cloud.aiplatform_v1.types.Index): - Required. The Index which updates the - resource on the server. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - The update mask applies to the resource. For the - ``FieldMask`` definition, see - [google.protobuf.FieldMask][google.protobuf.FieldMask]. - """ - - index = proto.Field( - proto.MESSAGE, - number=1, - message=gca_index.Index, - ) - update_mask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - - -class UpdateIndexOperationMetadata(proto.Message): - r"""Runtime operation information for - [IndexService.UpdateIndex][google.cloud.aiplatform.v1.IndexService.UpdateIndex]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): - The operation generic information. - nearest_neighbor_search_operation_metadata (google.cloud.aiplatform_v1.types.NearestNeighborSearchOperationMetadata): - The operation metadata with regard to - Matching Engine Index operation. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - nearest_neighbor_search_operation_metadata = proto.Field( - proto.MESSAGE, - number=2, - message='NearestNeighborSearchOperationMetadata', - ) - - -class DeleteIndexRequest(proto.Message): - r"""Request message for - [IndexService.DeleteIndex][google.cloud.aiplatform.v1.IndexService.DeleteIndex]. - - Attributes: - name (str): - Required. The name of the Index resource to be deleted. - Format: - ``projects/{project}/locations/{location}/indexes/{index}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class NearestNeighborSearchOperationMetadata(proto.Message): - r"""Runtime operation metadata with regard to Matching Engine - Index. - - Attributes: - content_validation_stats (Sequence[google.cloud.aiplatform_v1.types.NearestNeighborSearchOperationMetadata.ContentValidationStats]): - The validation stats of the content (per file) to be - inserted or updated on the Matching Engine Index resource. - Populated if contentsDeltaUri is provided as part of - [Index.metadata][google.cloud.aiplatform.v1.Index.metadata]. - Please note that, currently for those files that are broken - or has unsupported file format, we will not have the stats - for those files. - data_bytes_count (int): - The ingested data size in bytes. - """ - - class RecordError(proto.Message): - r""" - - Attributes: - error_type (google.cloud.aiplatform_v1.types.NearestNeighborSearchOperationMetadata.RecordError.RecordErrorType): - The error type of this record. - error_message (str): - A human-readable message that is shown to the user to help - them fix the error. Note that this message may change from - time to time, your code should check against error_type as - the source of truth. - source_gcs_uri (str): - Cloud Storage URI pointing to the original - file in user's bucket. - embedding_id (str): - Empty if the embedding id is failed to parse. - raw_record (str): - The original content of this record. - """ - class RecordErrorType(proto.Enum): - r"""""" - ERROR_TYPE_UNSPECIFIED = 0 - EMPTY_LINE = 1 - INVALID_JSON_SYNTAX = 2 - INVALID_CSV_SYNTAX = 3 - INVALID_AVRO_SYNTAX = 4 - INVALID_EMBEDDING_ID = 5 - EMBEDDING_SIZE_MISMATCH = 6 - NAMESPACE_MISSING = 7 - - error_type = proto.Field( - proto.ENUM, - number=1, - enum='NearestNeighborSearchOperationMetadata.RecordError.RecordErrorType', - ) - error_message = proto.Field( - proto.STRING, - number=2, - ) - source_gcs_uri = proto.Field( - proto.STRING, - number=3, - ) - embedding_id = proto.Field( - proto.STRING, - number=4, - ) - raw_record = proto.Field( - proto.STRING, - number=5, - ) - - class ContentValidationStats(proto.Message): - r""" - - Attributes: - source_gcs_uri (str): - Cloud Storage URI pointing to the original - file in user's bucket. - valid_record_count (int): - Number of records in this file that were - successfully processed. - invalid_record_count (int): - Number of records in this file we skipped due - to validate errors. - partial_errors (Sequence[google.cloud.aiplatform_v1.types.NearestNeighborSearchOperationMetadata.RecordError]): - The detail information of the partial - failures encountered for those invalid records - that couldn't be parsed. Up to 50 partial errors - will be reported. - """ - - source_gcs_uri = proto.Field( - proto.STRING, - number=1, - ) - valid_record_count = proto.Field( - proto.INT64, - number=2, - ) - invalid_record_count = proto.Field( - proto.INT64, - number=3, - ) - partial_errors = proto.RepeatedField( - proto.MESSAGE, - number=4, - message='NearestNeighborSearchOperationMetadata.RecordError', - ) - - content_validation_stats = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=ContentValidationStats, - ) - data_bytes_count = proto.Field( - proto.INT64, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/io.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/io.py deleted file mode 100644 index 1264dd713e..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/io.py +++ /dev/null @@ -1,198 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'AvroSource', - 'CsvSource', - 'GcsSource', - 'GcsDestination', - 'BigQuerySource', - 'BigQueryDestination', - 'CsvDestination', - 'TFRecordDestination', - 'ContainerRegistryDestination', - }, -) - - -class AvroSource(proto.Message): - r"""The storage details for Avro input content. - - Attributes: - gcs_source (google.cloud.aiplatform_v1.types.GcsSource): - Required. Google Cloud Storage location. - """ - - gcs_source = proto.Field( - proto.MESSAGE, - number=1, - message='GcsSource', - ) - - -class CsvSource(proto.Message): - r"""The storage details for CSV input content. - - Attributes: - gcs_source (google.cloud.aiplatform_v1.types.GcsSource): - Required. Google Cloud Storage location. - """ - - gcs_source = proto.Field( - proto.MESSAGE, - number=1, - message='GcsSource', - ) - - -class GcsSource(proto.Message): - r"""The Google Cloud Storage location for the input content. - - Attributes: - uris (Sequence[str]): - Required. Google Cloud Storage URI(-s) to the - input file(s). May contain wildcards. For more - information on wildcards, see - https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames. - """ - - uris = proto.RepeatedField( - proto.STRING, - number=1, - ) - - -class GcsDestination(proto.Message): - r"""The Google Cloud Storage location where the output is to be - written to. - - Attributes: - output_uri_prefix (str): - Required. Google Cloud Storage URI to output - directory. If the uri doesn't end with '/', a - '/' will be automatically appended. The - directory is created if it doesn't exist. - """ - - output_uri_prefix = proto.Field( - proto.STRING, - number=1, - ) - - -class BigQuerySource(proto.Message): - r"""The BigQuery location for the input content. - - Attributes: - input_uri (str): - Required. BigQuery URI to a table, up to 2000 characters - long. Accepted forms: - - - BigQuery path. For example: - ``bq://projectId.bqDatasetId.bqTableId``. - """ - - input_uri = proto.Field( - proto.STRING, - number=1, - ) - - -class BigQueryDestination(proto.Message): - r"""The BigQuery location for the output content. - - Attributes: - output_uri (str): - Required. BigQuery URI to a project or table, up to 2000 - characters long. - - When only the project is specified, the Dataset and Table is - created. When the full table reference is specified, the - Dataset must exist and table must not exist. - - Accepted forms: - - - BigQuery path. For example: ``bq://projectId`` or - ``bq://projectId.bqDatasetId`` or - ``bq://projectId.bqDatasetId.bqTableId``. - """ - - output_uri = proto.Field( - proto.STRING, - number=1, - ) - - -class CsvDestination(proto.Message): - r"""The storage details for CSV output content. - - Attributes: - gcs_destination (google.cloud.aiplatform_v1.types.GcsDestination): - Required. Google Cloud Storage location. - """ - - gcs_destination = proto.Field( - proto.MESSAGE, - number=1, - message='GcsDestination', - ) - - -class TFRecordDestination(proto.Message): - r"""The storage details for TFRecord output content. - - Attributes: - gcs_destination (google.cloud.aiplatform_v1.types.GcsDestination): - Required. Google Cloud Storage location. - """ - - gcs_destination = proto.Field( - proto.MESSAGE, - number=1, - message='GcsDestination', - ) - - -class ContainerRegistryDestination(proto.Message): - r"""The Container Registry location for the container image. - - Attributes: - output_uri (str): - Required. Container Registry URI of a container image. Only - Google Container Registry and Artifact Registry are - supported now. Accepted forms: - - - Google Container Registry path. For example: - ``gcr.io/projectId/imageName:tag``. - - - Artifact Registry path. For example: - ``us-central1-docker.pkg.dev/projectId/repoName/imageName:tag``. - - If a tag is not specified, "latest" will be used as the - default tag. - """ - - output_uri = proto.Field( - proto.STRING, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/job_service.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/job_service.py deleted file mode 100644 index 3a8b064d8e..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/job_service.py +++ /dev/null @@ -1,1093 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1.types import batch_prediction_job as gca_batch_prediction_job -from google.cloud.aiplatform_v1.types import custom_job as gca_custom_job -from google.cloud.aiplatform_v1.types import data_labeling_job as gca_data_labeling_job -from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job as gca_hyperparameter_tuning_job -from google.cloud.aiplatform_v1.types import model_deployment_monitoring_job as gca_model_deployment_monitoring_job -from google.cloud.aiplatform_v1.types import operation -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'CreateCustomJobRequest', - 'GetCustomJobRequest', - 'ListCustomJobsRequest', - 'ListCustomJobsResponse', - 'DeleteCustomJobRequest', - 'CancelCustomJobRequest', - 'CreateDataLabelingJobRequest', - 'GetDataLabelingJobRequest', - 'ListDataLabelingJobsRequest', - 'ListDataLabelingJobsResponse', - 'DeleteDataLabelingJobRequest', - 'CancelDataLabelingJobRequest', - 'CreateHyperparameterTuningJobRequest', - 'GetHyperparameterTuningJobRequest', - 'ListHyperparameterTuningJobsRequest', - 'ListHyperparameterTuningJobsResponse', - 'DeleteHyperparameterTuningJobRequest', - 'CancelHyperparameterTuningJobRequest', - 'CreateBatchPredictionJobRequest', - 'GetBatchPredictionJobRequest', - 'ListBatchPredictionJobsRequest', - 'ListBatchPredictionJobsResponse', - 'DeleteBatchPredictionJobRequest', - 'CancelBatchPredictionJobRequest', - 'CreateModelDeploymentMonitoringJobRequest', - 'SearchModelDeploymentMonitoringStatsAnomaliesRequest', - 'SearchModelDeploymentMonitoringStatsAnomaliesResponse', - 'GetModelDeploymentMonitoringJobRequest', - 'ListModelDeploymentMonitoringJobsRequest', - 'ListModelDeploymentMonitoringJobsResponse', - 'UpdateModelDeploymentMonitoringJobRequest', - 'DeleteModelDeploymentMonitoringJobRequest', - 'PauseModelDeploymentMonitoringJobRequest', - 'ResumeModelDeploymentMonitoringJobRequest', - 'UpdateModelDeploymentMonitoringJobOperationMetadata', - }, -) - - -class CreateCustomJobRequest(proto.Message): - r"""Request message for - [JobService.CreateCustomJob][google.cloud.aiplatform.v1.JobService.CreateCustomJob]. - - Attributes: - parent (str): - Required. The resource name of the Location to create the - CustomJob in. Format: - ``projects/{project}/locations/{location}`` - custom_job (google.cloud.aiplatform_v1.types.CustomJob): - Required. The CustomJob to create. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - custom_job = proto.Field( - proto.MESSAGE, - number=2, - message=gca_custom_job.CustomJob, - ) - - -class GetCustomJobRequest(proto.Message): - r"""Request message for - [JobService.GetCustomJob][google.cloud.aiplatform.v1.JobService.GetCustomJob]. - - Attributes: - name (str): - Required. The name of the CustomJob resource. Format: - ``projects/{project}/locations/{location}/customJobs/{custom_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListCustomJobsRequest(proto.Message): - r"""Request message for - [JobService.ListCustomJobs][google.cloud.aiplatform.v1.JobService.ListCustomJobs]. - - Attributes: - parent (str): - Required. The resource name of the Location to list the - CustomJobs from. Format: - ``projects/{project}/locations/{location}`` - filter (str): - The standard list filter. - - Supported fields: - - - ``display_name`` supports = and !=. - - - ``state`` supports = and !=. - - Some examples of using the filter are: - - - ``state="JOB_STATE_SUCCEEDED" AND display_name="my_job"`` - - - ``state="JOB_STATE_RUNNING" OR display_name="my_job"`` - - - ``NOT display_name="my_job"`` - - - ``state="JOB_STATE_FAILED"`` - page_size (int): - The standard list page size. - page_token (str): - The standard list page token. Typically obtained via - [ListCustomJobsResponse.next_page_token][google.cloud.aiplatform.v1.ListCustomJobsResponse.next_page_token] - of the previous - [JobService.ListCustomJobs][google.cloud.aiplatform.v1.JobService.ListCustomJobs] - call. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=5, - message=field_mask_pb2.FieldMask, - ) - - -class ListCustomJobsResponse(proto.Message): - r"""Response message for - [JobService.ListCustomJobs][google.cloud.aiplatform.v1.JobService.ListCustomJobs] - - Attributes: - custom_jobs (Sequence[google.cloud.aiplatform_v1.types.CustomJob]): - List of CustomJobs in the requested page. - next_page_token (str): - A token to retrieve the next page of results. Pass to - [ListCustomJobsRequest.page_token][google.cloud.aiplatform.v1.ListCustomJobsRequest.page_token] - to obtain that page. - """ - - @property - def raw_page(self): - return self - - custom_jobs = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_custom_job.CustomJob, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class DeleteCustomJobRequest(proto.Message): - r"""Request message for - [JobService.DeleteCustomJob][google.cloud.aiplatform.v1.JobService.DeleteCustomJob]. - - Attributes: - name (str): - Required. The name of the CustomJob resource to be deleted. - Format: - ``projects/{project}/locations/{location}/customJobs/{custom_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class CancelCustomJobRequest(proto.Message): - r"""Request message for - [JobService.CancelCustomJob][google.cloud.aiplatform.v1.JobService.CancelCustomJob]. - - Attributes: - name (str): - Required. The name of the CustomJob to cancel. Format: - ``projects/{project}/locations/{location}/customJobs/{custom_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class CreateDataLabelingJobRequest(proto.Message): - r"""Request message for - [JobService.CreateDataLabelingJob][google.cloud.aiplatform.v1.JobService.CreateDataLabelingJob]. - - Attributes: - parent (str): - Required. The parent of the DataLabelingJob. Format: - ``projects/{project}/locations/{location}`` - data_labeling_job (google.cloud.aiplatform_v1.types.DataLabelingJob): - Required. The DataLabelingJob to create. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - data_labeling_job = proto.Field( - proto.MESSAGE, - number=2, - message=gca_data_labeling_job.DataLabelingJob, - ) - - -class GetDataLabelingJobRequest(proto.Message): - r"""Request message for - [JobService.GetDataLabelingJob][google.cloud.aiplatform.v1.JobService.GetDataLabelingJob]. - - Attributes: - name (str): - Required. The name of the DataLabelingJob. Format: - ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListDataLabelingJobsRequest(proto.Message): - r"""Request message for - [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1.JobService.ListDataLabelingJobs]. - - Attributes: - parent (str): - Required. The parent of the DataLabelingJob. Format: - ``projects/{project}/locations/{location}`` - filter (str): - The standard list filter. - - Supported fields: - - - ``display_name`` supports = and !=. - - - ``state`` supports = and !=. - - Some examples of using the filter are: - - - ``state="JOB_STATE_SUCCEEDED" AND display_name="my_job"`` - - - ``state="JOB_STATE_RUNNING" OR display_name="my_job"`` - - - ``NOT display_name="my_job"`` - - - ``state="JOB_STATE_FAILED"`` - page_size (int): - The standard list page size. - page_token (str): - The standard list page token. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. FieldMask represents a - set of symbolic field paths. For example, the mask can be - ``paths: "name"``. The "name" here is a field in - DataLabelingJob. If this field is not set, all fields of the - DataLabelingJob are returned. - order_by (str): - A comma-separated list of fields to order by, sorted in - ascending order by default. Use ``desc`` after a field name - for descending. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=5, - message=field_mask_pb2.FieldMask, - ) - order_by = proto.Field( - proto.STRING, - number=6, - ) - - -class ListDataLabelingJobsResponse(proto.Message): - r"""Response message for - [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1.JobService.ListDataLabelingJobs]. - - Attributes: - data_labeling_jobs (Sequence[google.cloud.aiplatform_v1.types.DataLabelingJob]): - A list of DataLabelingJobs that matches the - specified filter in the request. - next_page_token (str): - The standard List next-page token. - """ - - @property - def raw_page(self): - return self - - data_labeling_jobs = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_data_labeling_job.DataLabelingJob, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class DeleteDataLabelingJobRequest(proto.Message): - r"""Request message for - [JobService.DeleteDataLabelingJob][google.cloud.aiplatform.v1.JobService.DeleteDataLabelingJob]. - - Attributes: - name (str): - Required. The name of the DataLabelingJob to be deleted. - Format: - ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class CancelDataLabelingJobRequest(proto.Message): - r"""Request message for - [JobService.CancelDataLabelingJob][google.cloud.aiplatform.v1.JobService.CancelDataLabelingJob]. - - Attributes: - name (str): - Required. The name of the DataLabelingJob. Format: - ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class CreateHyperparameterTuningJobRequest(proto.Message): - r"""Request message for - [JobService.CreateHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.CreateHyperparameterTuningJob]. - - Attributes: - parent (str): - Required. The resource name of the Location to create the - HyperparameterTuningJob in. Format: - ``projects/{project}/locations/{location}`` - hyperparameter_tuning_job (google.cloud.aiplatform_v1.types.HyperparameterTuningJob): - Required. The HyperparameterTuningJob to - create. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - hyperparameter_tuning_job = proto.Field( - proto.MESSAGE, - number=2, - message=gca_hyperparameter_tuning_job.HyperparameterTuningJob, - ) - - -class GetHyperparameterTuningJobRequest(proto.Message): - r"""Request message for - [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.GetHyperparameterTuningJob]. - - Attributes: - name (str): - Required. The name of the HyperparameterTuningJob resource. - Format: - ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListHyperparameterTuningJobsRequest(proto.Message): - r"""Request message for - [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1.JobService.ListHyperparameterTuningJobs]. - - Attributes: - parent (str): - Required. The resource name of the Location to list the - HyperparameterTuningJobs from. Format: - ``projects/{project}/locations/{location}`` - filter (str): - The standard list filter. - - Supported fields: - - - ``display_name`` supports = and !=. - - - ``state`` supports = and !=. - - Some examples of using the filter are: - - - ``state="JOB_STATE_SUCCEEDED" AND display_name="my_job"`` - - - ``state="JOB_STATE_RUNNING" OR display_name="my_job"`` - - - ``NOT display_name="my_job"`` - - - ``state="JOB_STATE_FAILED"`` - page_size (int): - The standard list page size. - page_token (str): - The standard list page token. Typically obtained via - [ListHyperparameterTuningJobsResponse.next_page_token][google.cloud.aiplatform.v1.ListHyperparameterTuningJobsResponse.next_page_token] - of the previous - [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1.JobService.ListHyperparameterTuningJobs] - call. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=5, - message=field_mask_pb2.FieldMask, - ) - - -class ListHyperparameterTuningJobsResponse(proto.Message): - r"""Response message for - [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1.JobService.ListHyperparameterTuningJobs] - - Attributes: - hyperparameter_tuning_jobs (Sequence[google.cloud.aiplatform_v1.types.HyperparameterTuningJob]): - List of HyperparameterTuningJobs in the requested page. - [HyperparameterTuningJob.trials][google.cloud.aiplatform.v1.HyperparameterTuningJob.trials] - of the jobs will be not be returned. - next_page_token (str): - A token to retrieve the next page of results. Pass to - [ListHyperparameterTuningJobsRequest.page_token][google.cloud.aiplatform.v1.ListHyperparameterTuningJobsRequest.page_token] - to obtain that page. - """ - - @property - def raw_page(self): - return self - - hyperparameter_tuning_jobs = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_hyperparameter_tuning_job.HyperparameterTuningJob, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class DeleteHyperparameterTuningJobRequest(proto.Message): - r"""Request message for - [JobService.DeleteHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.DeleteHyperparameterTuningJob]. - - Attributes: - name (str): - Required. The name of the HyperparameterTuningJob resource - to be deleted. Format: - ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class CancelHyperparameterTuningJobRequest(proto.Message): - r"""Request message for - [JobService.CancelHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.CancelHyperparameterTuningJob]. - - Attributes: - name (str): - Required. The name of the HyperparameterTuningJob to cancel. - Format: - ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class CreateBatchPredictionJobRequest(proto.Message): - r"""Request message for - [JobService.CreateBatchPredictionJob][google.cloud.aiplatform.v1.JobService.CreateBatchPredictionJob]. - - Attributes: - parent (str): - Required. The resource name of the Location to create the - BatchPredictionJob in. Format: - ``projects/{project}/locations/{location}`` - batch_prediction_job (google.cloud.aiplatform_v1.types.BatchPredictionJob): - Required. The BatchPredictionJob to create. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - batch_prediction_job = proto.Field( - proto.MESSAGE, - number=2, - message=gca_batch_prediction_job.BatchPredictionJob, - ) - - -class GetBatchPredictionJobRequest(proto.Message): - r"""Request message for - [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1.JobService.GetBatchPredictionJob]. - - Attributes: - name (str): - Required. The name of the BatchPredictionJob resource. - Format: - ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListBatchPredictionJobsRequest(proto.Message): - r"""Request message for - [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1.JobService.ListBatchPredictionJobs]. - - Attributes: - parent (str): - Required. The resource name of the Location to list the - BatchPredictionJobs from. Format: - ``projects/{project}/locations/{location}`` - filter (str): - The standard list filter. - - Supported fields: - - - ``display_name`` supports = and !=. - - - ``state`` supports = and !=. - - - ``model_display_name`` supports = and != - - Some examples of using the filter are: - - - ``state="JOB_STATE_SUCCEEDED" AND display_name="my_job"`` - - - ``state="JOB_STATE_RUNNING" OR display_name="my_job"`` - - - ``NOT display_name="my_job"`` - - - ``state="JOB_STATE_FAILED"`` - page_size (int): - The standard list page size. - page_token (str): - The standard list page token. Typically obtained via - [ListBatchPredictionJobsResponse.next_page_token][google.cloud.aiplatform.v1.ListBatchPredictionJobsResponse.next_page_token] - of the previous - [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1.JobService.ListBatchPredictionJobs] - call. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=5, - message=field_mask_pb2.FieldMask, - ) - - -class ListBatchPredictionJobsResponse(proto.Message): - r"""Response message for - [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1.JobService.ListBatchPredictionJobs] - - Attributes: - batch_prediction_jobs (Sequence[google.cloud.aiplatform_v1.types.BatchPredictionJob]): - List of BatchPredictionJobs in the requested - page. - next_page_token (str): - A token to retrieve the next page of results. Pass to - [ListBatchPredictionJobsRequest.page_token][google.cloud.aiplatform.v1.ListBatchPredictionJobsRequest.page_token] - to obtain that page. - """ - - @property - def raw_page(self): - return self - - batch_prediction_jobs = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_batch_prediction_job.BatchPredictionJob, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class DeleteBatchPredictionJobRequest(proto.Message): - r"""Request message for - [JobService.DeleteBatchPredictionJob][google.cloud.aiplatform.v1.JobService.DeleteBatchPredictionJob]. - - Attributes: - name (str): - Required. The name of the BatchPredictionJob resource to be - deleted. Format: - ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class CancelBatchPredictionJobRequest(proto.Message): - r"""Request message for - [JobService.CancelBatchPredictionJob][google.cloud.aiplatform.v1.JobService.CancelBatchPredictionJob]. - - Attributes: - name (str): - Required. The name of the BatchPredictionJob to cancel. - Format: - ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class CreateModelDeploymentMonitoringJobRequest(proto.Message): - r"""Request message for - [JobService.CreateModelDeploymentMonitoringJob][google.cloud.aiplatform.v1.JobService.CreateModelDeploymentMonitoringJob]. - - Attributes: - parent (str): - Required. The parent of the ModelDeploymentMonitoringJob. - Format: ``projects/{project}/locations/{location}`` - model_deployment_monitoring_job (google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob): - Required. The ModelDeploymentMonitoringJob to - create - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - model_deployment_monitoring_job = proto.Field( - proto.MESSAGE, - number=2, - message=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob, - ) - - -class SearchModelDeploymentMonitoringStatsAnomaliesRequest(proto.Message): - r"""Request message for - [JobService.SearchModelDeploymentMonitoringStatsAnomalies][google.cloud.aiplatform.v1.JobService.SearchModelDeploymentMonitoringStatsAnomalies]. - - Attributes: - model_deployment_monitoring_job (str): - Required. ModelDeploymentMonitoring Job resource name. - Format: - \`projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job} - deployed_model_id (str): - Required. The DeployedModel ID of the - [ModelDeploymentMonitoringObjectiveConfig.deployed_model_id]. - feature_display_name (str): - The feature display name. If specified, only return the - stats belonging to this feature. Format: - [ModelMonitoringStatsAnomalies.FeatureHistoricStatsAnomalies.feature_display_name][google.cloud.aiplatform.v1.ModelMonitoringStatsAnomalies.FeatureHistoricStatsAnomalies.feature_display_name], - example: "user_destination". - objectives (Sequence[google.cloud.aiplatform_v1.types.SearchModelDeploymentMonitoringStatsAnomaliesRequest.StatsAnomaliesObjective]): - Required. Objectives of the stats to - retrieve. - page_size (int): - The standard list page size. - page_token (str): - A page token received from a previous - [JobService.SearchModelDeploymentMonitoringStatsAnomalies][google.cloud.aiplatform.v1.JobService.SearchModelDeploymentMonitoringStatsAnomalies] - call. - start_time (google.protobuf.timestamp_pb2.Timestamp): - The earliest timestamp of stats being - generated. If not set, indicates fetching stats - till the earliest possible one. - end_time (google.protobuf.timestamp_pb2.Timestamp): - The latest timestamp of stats being - generated. If not set, indicates feching stats - till the latest possible one. - """ - - class StatsAnomaliesObjective(proto.Message): - r"""Stats requested for specific objective. - - Attributes: - type_ (google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringObjectiveType): - - top_feature_count (int): - If set, all attribution scores between - [SearchModelDeploymentMonitoringStatsAnomaliesRequest.start_time][google.cloud.aiplatform.v1.SearchModelDeploymentMonitoringStatsAnomaliesRequest.start_time] - and - [SearchModelDeploymentMonitoringStatsAnomaliesRequest.end_time][google.cloud.aiplatform.v1.SearchModelDeploymentMonitoringStatsAnomaliesRequest.end_time] - are fetched, and page token doesn't take affect in this - case. Only used to retrieve attribution score for the top - Features which has the highest attribution score in the - latest monitoring run. - """ - - type_ = proto.Field( - proto.ENUM, - number=1, - enum=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringObjectiveType, - ) - top_feature_count = proto.Field( - proto.INT32, - number=4, - ) - - model_deployment_monitoring_job = proto.Field( - proto.STRING, - number=1, - ) - deployed_model_id = proto.Field( - proto.STRING, - number=2, - ) - feature_display_name = proto.Field( - proto.STRING, - number=3, - ) - objectives = proto.RepeatedField( - proto.MESSAGE, - number=4, - message=StatsAnomaliesObjective, - ) - page_size = proto.Field( - proto.INT32, - number=5, - ) - page_token = proto.Field( - proto.STRING, - number=6, - ) - start_time = proto.Field( - proto.MESSAGE, - number=7, - message=timestamp_pb2.Timestamp, - ) - end_time = proto.Field( - proto.MESSAGE, - number=8, - message=timestamp_pb2.Timestamp, - ) - - -class SearchModelDeploymentMonitoringStatsAnomaliesResponse(proto.Message): - r"""Response message for - [JobService.SearchModelDeploymentMonitoringStatsAnomalies][google.cloud.aiplatform.v1.JobService.SearchModelDeploymentMonitoringStatsAnomalies]. - - Attributes: - monitoring_stats (Sequence[google.cloud.aiplatform_v1.types.ModelMonitoringStatsAnomalies]): - Stats retrieved for requested objectives. There are at most - 1000 - [ModelMonitoringStatsAnomalies.FeatureHistoricStatsAnomalies.prediction_stats][google.cloud.aiplatform.v1.ModelMonitoringStatsAnomalies.FeatureHistoricStatsAnomalies.prediction_stats] - in the response. - next_page_token (str): - The page token that can be used by the next - [JobService.SearchModelDeploymentMonitoringStatsAnomalies][google.cloud.aiplatform.v1.JobService.SearchModelDeploymentMonitoringStatsAnomalies] - call. - """ - - @property - def raw_page(self): - return self - - monitoring_stats = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class GetModelDeploymentMonitoringJobRequest(proto.Message): - r"""Request message for - [JobService.GetModelDeploymentMonitoringJob][google.cloud.aiplatform.v1.JobService.GetModelDeploymentMonitoringJob]. - - Attributes: - name (str): - Required. The resource name of the - ModelDeploymentMonitoringJob. Format: - ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListModelDeploymentMonitoringJobsRequest(proto.Message): - r"""Request message for - [JobService.ListModelDeploymentMonitoringJobs][google.cloud.aiplatform.v1.JobService.ListModelDeploymentMonitoringJobs]. - - Attributes: - parent (str): - Required. The parent of the ModelDeploymentMonitoringJob. - Format: ``projects/{project}/locations/{location}`` - filter (str): - The standard list filter. - page_size (int): - The standard list page size. - page_token (str): - The standard list page token. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=5, - message=field_mask_pb2.FieldMask, - ) - - -class ListModelDeploymentMonitoringJobsResponse(proto.Message): - r"""Response message for - [JobService.ListModelDeploymentMonitoringJobs][google.cloud.aiplatform.v1.JobService.ListModelDeploymentMonitoringJobs]. - - Attributes: - model_deployment_monitoring_jobs (Sequence[google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob]): - A list of ModelDeploymentMonitoringJobs that - matches the specified filter in the request. - next_page_token (str): - The standard List next-page token. - """ - - @property - def raw_page(self): - return self - - model_deployment_monitoring_jobs = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class UpdateModelDeploymentMonitoringJobRequest(proto.Message): - r"""Request message for - [JobService.UpdateModelDeploymentMonitoringJob][google.cloud.aiplatform.v1.JobService.UpdateModelDeploymentMonitoringJob]. - - Attributes: - model_deployment_monitoring_job (google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob): - Required. The model monitoring configuration - which replaces the resource on the server. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The update mask is used to specify the fields to - be overwritten in the ModelDeploymentMonitoringJob resource - by the update. The fields specified in the update_mask are - relative to the resource, not the full request. A field will - be overwritten if it is in the mask. If the user does not - provide a mask then only the non-empty fields present in the - request will be overwritten. Set the update_mask to ``*`` to - override all fields. For the objective config, the user can - either provide the update mask for - model_deployment_monitoring_objective_configs or any - combination of its nested fields, such as: - model_deployment_monitoring_objective_configs.objective_config.training_dataset. - - Updatable fields: - - - ``display_name`` - - ``model_deployment_monitoring_schedule_config`` - - ``model_monitoring_alert_config`` - - ``logging_sampling_strategy`` - - ``labels`` - - ``log_ttl`` - - ``enable_monitoring_pipeline_logs`` . and - - ``model_deployment_monitoring_objective_configs`` . or - - ``model_deployment_monitoring_objective_configs.objective_config.training_dataset`` - - ``model_deployment_monitoring_objective_configs.objective_config.training_prediction_skew_detection_config`` - - ``model_deployment_monitoring_objective_configs.objective_config.prediction_drift_detection_config`` - """ - - model_deployment_monitoring_job = proto.Field( - proto.MESSAGE, - number=1, - message=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob, - ) - update_mask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - - -class DeleteModelDeploymentMonitoringJobRequest(proto.Message): - r"""Request message for - [JobService.DeleteModelDeploymentMonitoringJob][google.cloud.aiplatform.v1.JobService.DeleteModelDeploymentMonitoringJob]. - - Attributes: - name (str): - Required. The resource name of the model monitoring job to - delete. Format: - ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class PauseModelDeploymentMonitoringJobRequest(proto.Message): - r"""Request message for - [JobService.PauseModelDeploymentMonitoringJob][google.cloud.aiplatform.v1.JobService.PauseModelDeploymentMonitoringJob]. - - Attributes: - name (str): - Required. The resource name of the - ModelDeploymentMonitoringJob to pause. Format: - ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ResumeModelDeploymentMonitoringJobRequest(proto.Message): - r"""Request message for - [JobService.ResumeModelDeploymentMonitoringJob][google.cloud.aiplatform.v1.JobService.ResumeModelDeploymentMonitoringJob]. - - Attributes: - name (str): - Required. The resource name of the - ModelDeploymentMonitoringJob to resume. Format: - ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class UpdateModelDeploymentMonitoringJobOperationMetadata(proto.Message): - r"""Runtime operation information for - [JobService.UpdateModelDeploymentMonitoringJob][google.cloud.aiplatform.v1.JobService.UpdateModelDeploymentMonitoringJob]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): - The operation generic information. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/job_state.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/job_state.py deleted file mode 100644 index e28545fb22..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/job_state.py +++ /dev/null @@ -1,41 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'JobState', - }, -) - - -class JobState(proto.Enum): - r"""Describes the state of a job.""" - JOB_STATE_UNSPECIFIED = 0 - JOB_STATE_QUEUED = 1 - JOB_STATE_PENDING = 2 - JOB_STATE_RUNNING = 3 - JOB_STATE_SUCCEEDED = 4 - JOB_STATE_FAILED = 5 - JOB_STATE_CANCELLING = 6 - JOB_STATE_CANCELLED = 7 - JOB_STATE_PAUSED = 8 - JOB_STATE_EXPIRED = 9 - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/lineage_subgraph.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/lineage_subgraph.py deleted file mode 100644 index 9e1cf46e35..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/lineage_subgraph.py +++ /dev/null @@ -1,62 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1.types import artifact -from google.cloud.aiplatform_v1.types import event -from google.cloud.aiplatform_v1.types import execution - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'LineageSubgraph', - }, -) - - -class LineageSubgraph(proto.Message): - r"""A subgraph of the overall lineage graph. Event edges connect - Artifact and Execution nodes. - - Attributes: - artifacts (Sequence[google.cloud.aiplatform_v1.types.Artifact]): - The Artifact nodes in the subgraph. - executions (Sequence[google.cloud.aiplatform_v1.types.Execution]): - The Execution nodes in the subgraph. - events (Sequence[google.cloud.aiplatform_v1.types.Event]): - The Event edges between Artifacts and - Executions in the subgraph. - """ - - artifacts = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=artifact.Artifact, - ) - executions = proto.RepeatedField( - proto.MESSAGE, - number=2, - message=execution.Execution, - ) - events = proto.RepeatedField( - proto.MESSAGE, - number=3, - message=event.Event, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/machine_resources.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/machine_resources.py deleted file mode 100644 index 84799af0a2..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/machine_resources.py +++ /dev/null @@ -1,310 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1.types import accelerator_type as gca_accelerator_type - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'MachineSpec', - 'DedicatedResources', - 'AutomaticResources', - 'BatchDedicatedResources', - 'ResourcesConsumed', - 'DiskSpec', - 'AutoscalingMetricSpec', - }, -) - - -class MachineSpec(proto.Message): - r"""Specification of a single machine. - - Attributes: - machine_type (str): - Immutable. The type of the machine. - - See the `list of machine types supported for - prediction `__ - - See the `list of machine types supported for custom - training `__. - - For - [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] - this field is optional, and the default value is - ``n1-standard-2``. For - [BatchPredictionJob][google.cloud.aiplatform.v1.BatchPredictionJob] - or as part of - [WorkerPoolSpec][google.cloud.aiplatform.v1.WorkerPoolSpec] - this field is required. - accelerator_type (google.cloud.aiplatform_v1.types.AcceleratorType): - Immutable. The type of accelerator(s) that may be attached - to the machine as per - [accelerator_count][google.cloud.aiplatform.v1.MachineSpec.accelerator_count]. - accelerator_count (int): - The number of accelerators to attach to the - machine. - """ - - machine_type = proto.Field( - proto.STRING, - number=1, - ) - accelerator_type = proto.Field( - proto.ENUM, - number=2, - enum=gca_accelerator_type.AcceleratorType, - ) - accelerator_count = proto.Field( - proto.INT32, - number=3, - ) - - -class DedicatedResources(proto.Message): - r"""A description of resources that are dedicated to a - DeployedModel, and that need a higher degree of manual - configuration. - - Attributes: - machine_spec (google.cloud.aiplatform_v1.types.MachineSpec): - Required. Immutable. The specification of a - single machine used by the prediction. - min_replica_count (int): - Required. Immutable. The minimum number of - machine replicas this DeployedModel will be - always deployed on. This value must be greater - than or equal to 1. - If traffic against the DeployedModel increases, - it may dynamically be deployed onto more - replicas, and as traffic decreases, some of - these extra replicas may be freed. - max_replica_count (int): - Immutable. The maximum number of replicas this DeployedModel - may be deployed on when the traffic against it increases. If - the requested value is too large, the deployment will error, - but if deployment succeeds then the ability to scale the - model to that many replicas is guaranteed (barring service - outages). If traffic against the DeployedModel increases - beyond what its replicas at maximum may handle, a portion of - the traffic will be dropped. If this value is not provided, - will use - [min_replica_count][google.cloud.aiplatform.v1.DedicatedResources.min_replica_count] - as the default value. - autoscaling_metric_specs (Sequence[google.cloud.aiplatform_v1.types.AutoscalingMetricSpec]): - Immutable. The metric specifications that overrides a - resource utilization metric (CPU utilization, accelerator's - duty cycle, and so on) target value (default to 60 if not - set). At most one entry is allowed per metric. - - If - [machine_spec.accelerator_count][google.cloud.aiplatform.v1.MachineSpec.accelerator_count] - is above 0, the autoscaling will be based on both CPU - utilization and accelerator's duty cycle metrics and scale - up when either metrics exceeds its target value while scale - down if both metrics are under their target value. The - default target value is 60 for both metrics. - - If - [machine_spec.accelerator_count][google.cloud.aiplatform.v1.MachineSpec.accelerator_count] - is 0, the autoscaling will be based on CPU utilization - metric only with default target value 60 if not explicitly - set. - - For example, in the case of Online Prediction, if you want - to override target CPU utilization to 80, you should set - [autoscaling_metric_specs.metric_name][google.cloud.aiplatform.v1.AutoscalingMetricSpec.metric_name] - to - ``aiplatform.googleapis.com/prediction/online/cpu/utilization`` - and - [autoscaling_metric_specs.target][google.cloud.aiplatform.v1.AutoscalingMetricSpec.target] - to ``80``. - """ - - machine_spec = proto.Field( - proto.MESSAGE, - number=1, - message='MachineSpec', - ) - min_replica_count = proto.Field( - proto.INT32, - number=2, - ) - max_replica_count = proto.Field( - proto.INT32, - number=3, - ) - autoscaling_metric_specs = proto.RepeatedField( - proto.MESSAGE, - number=4, - message='AutoscalingMetricSpec', - ) - - -class AutomaticResources(proto.Message): - r"""A description of resources that to large degree are decided - by Vertex AI, and require only a modest additional - configuration. Each Model supporting these resources documents - its specific guidelines. - - Attributes: - min_replica_count (int): - Immutable. The minimum number of replicas this DeployedModel - will be always deployed on. If traffic against it increases, - it may dynamically be deployed onto more replicas up to - [max_replica_count][google.cloud.aiplatform.v1.AutomaticResources.max_replica_count], - and as traffic decreases, some of these extra replicas may - be freed. If the requested value is too large, the - deployment will error. - max_replica_count (int): - Immutable. The maximum number of replicas - this DeployedModel may be deployed on when the - traffic against it increases. If the requested - value is too large, the deployment will error, - but if deployment succeeds then the ability to - scale the model to that many replicas is - guaranteed (barring service outages). If traffic - against the DeployedModel increases beyond what - its replicas at maximum may handle, a portion of - the traffic will be dropped. If this value is - not provided, a no upper bound for scaling under - heavy traffic will be assume, though Vertex AI - may be unable to scale beyond certain replica - number. - """ - - min_replica_count = proto.Field( - proto.INT32, - number=1, - ) - max_replica_count = proto.Field( - proto.INT32, - number=2, - ) - - -class BatchDedicatedResources(proto.Message): - r"""A description of resources that are used for performing batch - operations, are dedicated to a Model, and need manual - configuration. - - Attributes: - machine_spec (google.cloud.aiplatform_v1.types.MachineSpec): - Required. Immutable. The specification of a - single machine. - starting_replica_count (int): - Immutable. The number of machine replicas used at the start - of the batch operation. If not set, Vertex AI decides - starting number, not greater than - [max_replica_count][google.cloud.aiplatform.v1.BatchDedicatedResources.max_replica_count] - max_replica_count (int): - Immutable. The maximum number of machine - replicas the batch operation may be scaled to. - The default value is 10. - """ - - machine_spec = proto.Field( - proto.MESSAGE, - number=1, - message='MachineSpec', - ) - starting_replica_count = proto.Field( - proto.INT32, - number=2, - ) - max_replica_count = proto.Field( - proto.INT32, - number=3, - ) - - -class ResourcesConsumed(proto.Message): - r"""Statistics information about resource consumption. - - Attributes: - replica_hours (float): - Output only. The number of replica hours - used. Note that many replicas may run in - parallel, and additionally any given work may be - queued for some time. Therefore this value is - not strictly related to wall time. - """ - - replica_hours = proto.Field( - proto.DOUBLE, - number=1, - ) - - -class DiskSpec(proto.Message): - r"""Represents the spec of disk options. - - Attributes: - boot_disk_type (str): - Type of the boot disk (default is "pd-ssd"). - Valid values: "pd-ssd" (Persistent Disk Solid - State Drive) or "pd-standard" (Persistent Disk - Hard Disk Drive). - boot_disk_size_gb (int): - Size in GB of the boot disk (default is - 100GB). - """ - - boot_disk_type = proto.Field( - proto.STRING, - number=1, - ) - boot_disk_size_gb = proto.Field( - proto.INT32, - number=2, - ) - - -class AutoscalingMetricSpec(proto.Message): - r"""The metric specification that defines the target resource - utilization (CPU utilization, accelerator's duty cycle, and so - on) for calculating the desired replica count. - - Attributes: - metric_name (str): - Required. The resource metric name. Supported metrics: - - - For Online Prediction: - - ``aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle`` - - ``aiplatform.googleapis.com/prediction/online/cpu/utilization`` - target (int): - The target resource utilization in percentage - (1% - 100%) for the given metric; once the real - usage deviates from the target by a certain - percentage, the machine replicas change. The - default value is 60 (representing 60%) if not - provided. - """ - - metric_name = proto.Field( - proto.STRING, - number=1, - ) - target = proto.Field( - proto.INT32, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/manual_batch_tuning_parameters.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/manual_batch_tuning_parameters.py deleted file mode 100644 index 5d2410d3b2..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/manual_batch_tuning_parameters.py +++ /dev/null @@ -1,50 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'ManualBatchTuningParameters', - }, -) - - -class ManualBatchTuningParameters(proto.Message): - r"""Manual batch tuning parameters. - - Attributes: - batch_size (int): - Immutable. The number of the records (e.g. - instances) of the operation given in each batch - to a machine replica. Machine type, and size of - a single record should be considered when - setting this parameter, higher value speeds up - the batch operation's execution, but too high - value will result in a whole batch not fitting - in a machine's memory, and the whole operation - will fail. - The default value is 4. - """ - - batch_size = proto.Field( - proto.INT32, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/metadata_schema.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/metadata_schema.py deleted file mode 100644 index 2a346c63c7..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/metadata_schema.py +++ /dev/null @@ -1,96 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'MetadataSchema', - }, -) - - -class MetadataSchema(proto.Message): - r"""Instance of a general MetadataSchema. - - Attributes: - name (str): - Output only. The resource name of the - MetadataSchema. - schema_version (str): - The version of the MetadataSchema. The version's format must - match the following regular expression: - ``^[0-9]+[.][0-9]+[.][0-9]+$``, which would allow to - order/compare different versions. Example: 1.0.0, 1.0.1, - etc. - schema (str): - Required. The raw YAML string representation of the - MetadataSchema. The combination of [MetadataSchema.version] - and the schema name given by ``title`` in - [MetadataSchema.schema] must be unique within a - MetadataStore. - - The schema is defined as an OpenAPI 3.0.2 `MetadataSchema - Object `__ - schema_type (google.cloud.aiplatform_v1.types.MetadataSchema.MetadataSchemaType): - The type of the MetadataSchema. This is a - property that identifies which metadata types - will use the MetadataSchema. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - MetadataSchema was created. - description (str): - Description of the Metadata Schema - """ - class MetadataSchemaType(proto.Enum): - r"""Describes the type of the MetadataSchema.""" - METADATA_SCHEMA_TYPE_UNSPECIFIED = 0 - ARTIFACT_TYPE = 1 - EXECUTION_TYPE = 2 - CONTEXT_TYPE = 3 - - name = proto.Field( - proto.STRING, - number=1, - ) - schema_version = proto.Field( - proto.STRING, - number=2, - ) - schema = proto.Field( - proto.STRING, - number=3, - ) - schema_type = proto.Field( - proto.ENUM, - number=4, - enum=MetadataSchemaType, - ) - create_time = proto.Field( - proto.MESSAGE, - number=5, - message=timestamp_pb2.Timestamp, - ) - description = proto.Field( - proto.STRING, - number=6, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/metadata_service.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/metadata_service.py deleted file mode 100644 index 1c5d427899..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/metadata_service.py +++ /dev/null @@ -1,1477 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1.types import artifact as gca_artifact -from google.cloud.aiplatform_v1.types import context as gca_context -from google.cloud.aiplatform_v1.types import event -from google.cloud.aiplatform_v1.types import execution as gca_execution -from google.cloud.aiplatform_v1.types import metadata_schema as gca_metadata_schema -from google.cloud.aiplatform_v1.types import metadata_store as gca_metadata_store -from google.cloud.aiplatform_v1.types import operation -from google.protobuf import field_mask_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'CreateMetadataStoreRequest', - 'CreateMetadataStoreOperationMetadata', - 'GetMetadataStoreRequest', - 'ListMetadataStoresRequest', - 'ListMetadataStoresResponse', - 'DeleteMetadataStoreRequest', - 'DeleteMetadataStoreOperationMetadata', - 'CreateArtifactRequest', - 'GetArtifactRequest', - 'ListArtifactsRequest', - 'ListArtifactsResponse', - 'UpdateArtifactRequest', - 'DeleteArtifactRequest', - 'PurgeArtifactsRequest', - 'PurgeArtifactsResponse', - 'PurgeArtifactsMetadata', - 'CreateContextRequest', - 'GetContextRequest', - 'ListContextsRequest', - 'ListContextsResponse', - 'UpdateContextRequest', - 'DeleteContextRequest', - 'PurgeContextsRequest', - 'PurgeContextsResponse', - 'PurgeContextsMetadata', - 'AddContextArtifactsAndExecutionsRequest', - 'AddContextArtifactsAndExecutionsResponse', - 'AddContextChildrenRequest', - 'AddContextChildrenResponse', - 'QueryContextLineageSubgraphRequest', - 'CreateExecutionRequest', - 'GetExecutionRequest', - 'ListExecutionsRequest', - 'ListExecutionsResponse', - 'UpdateExecutionRequest', - 'DeleteExecutionRequest', - 'PurgeExecutionsRequest', - 'PurgeExecutionsResponse', - 'PurgeExecutionsMetadata', - 'AddExecutionEventsRequest', - 'AddExecutionEventsResponse', - 'QueryExecutionInputsAndOutputsRequest', - 'CreateMetadataSchemaRequest', - 'GetMetadataSchemaRequest', - 'ListMetadataSchemasRequest', - 'ListMetadataSchemasResponse', - 'QueryArtifactLineageSubgraphRequest', - }, -) - - -class CreateMetadataStoreRequest(proto.Message): - r"""Request message for - [MetadataService.CreateMetadataStore][google.cloud.aiplatform.v1.MetadataService.CreateMetadataStore]. - - Attributes: - parent (str): - Required. The resource name of the Location where the - MetadataStore should be created. Format: - ``projects/{project}/locations/{location}/`` - metadata_store (google.cloud.aiplatform_v1.types.MetadataStore): - Required. The MetadataStore to create. - metadata_store_id (str): - The {metadatastore} portion of the resource name with the - format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - If not provided, the MetadataStore's ID will be a UUID - generated by the service. Must be 4-128 characters in - length. Valid characters are ``/[a-z][0-9]-/``. Must be - unique across all MetadataStores in the parent Location. - (Otherwise the request will fail with ALREADY_EXISTS, or - PERMISSION_DENIED if the caller can't view the preexisting - MetadataStore.) - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - metadata_store = proto.Field( - proto.MESSAGE, - number=2, - message=gca_metadata_store.MetadataStore, - ) - metadata_store_id = proto.Field( - proto.STRING, - number=3, - ) - - -class CreateMetadataStoreOperationMetadata(proto.Message): - r"""Details of operations that perform - [MetadataService.CreateMetadataStore][google.cloud.aiplatform.v1.MetadataService.CreateMetadataStore]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): - Operation metadata for creating a - MetadataStore. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class GetMetadataStoreRequest(proto.Message): - r"""Request message for - [MetadataService.GetMetadataStore][google.cloud.aiplatform.v1.MetadataService.GetMetadataStore]. - - Attributes: - name (str): - Required. The resource name of the MetadataStore to - retrieve. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListMetadataStoresRequest(proto.Message): - r"""Request message for - [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1.MetadataService.ListMetadataStores]. - - Attributes: - parent (str): - Required. The Location whose MetadataStores should be - listed. Format: ``projects/{project}/locations/{location}`` - page_size (int): - The maximum number of Metadata Stores to - return. The service may return fewer. - Must be in range 1-1000, inclusive. Defaults to - 100. - page_token (str): - A page token, received from a previous - [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1.MetadataService.ListMetadataStores] - call. Provide this to retrieve the subsequent page. - - When paginating, all other provided parameters must match - the call that provided the page token. (Otherwise the - request will fail with INVALID_ARGUMENT error.) - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - page_size = proto.Field( - proto.INT32, - number=2, - ) - page_token = proto.Field( - proto.STRING, - number=3, - ) - - -class ListMetadataStoresResponse(proto.Message): - r"""Response message for - [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1.MetadataService.ListMetadataStores]. - - Attributes: - metadata_stores (Sequence[google.cloud.aiplatform_v1.types.MetadataStore]): - The MetadataStores found for the Location. - next_page_token (str): - A token, which can be sent as - [ListMetadataStoresRequest.page_token][google.cloud.aiplatform.v1.ListMetadataStoresRequest.page_token] - to retrieve the next page. If this field is not populated, - there are no subsequent pages. - """ - - @property - def raw_page(self): - return self - - metadata_stores = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_metadata_store.MetadataStore, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class DeleteMetadataStoreRequest(proto.Message): - r"""Request message for - [MetadataService.DeleteMetadataStore][google.cloud.aiplatform.v1.MetadataService.DeleteMetadataStore]. - - Attributes: - name (str): - Required. The resource name of the MetadataStore to delete. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - force (bool): - Deprecated: Field is no longer supported. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - force = proto.Field( - proto.BOOL, - number=2, - ) - - -class DeleteMetadataStoreOperationMetadata(proto.Message): - r"""Details of operations that perform - [MetadataService.DeleteMetadataStore][google.cloud.aiplatform.v1.MetadataService.DeleteMetadataStore]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): - Operation metadata for deleting a - MetadataStore. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class CreateArtifactRequest(proto.Message): - r"""Request message for - [MetadataService.CreateArtifact][google.cloud.aiplatform.v1.MetadataService.CreateArtifact]. - - Attributes: - parent (str): - Required. The resource name of the MetadataStore where the - Artifact should be created. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - artifact (google.cloud.aiplatform_v1.types.Artifact): - Required. The Artifact to create. - artifact_id (str): - The {artifact} portion of the resource name with the format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` - If not provided, the Artifact's ID will be a UUID generated - by the service. Must be 4-128 characters in length. Valid - characters are ``/[a-z][0-9]-/``. Must be unique across all - Artifacts in the parent MetadataStore. (Otherwise the - request will fail with ALREADY_EXISTS, or PERMISSION_DENIED - if the caller can't view the preexisting Artifact.) - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - artifact = proto.Field( - proto.MESSAGE, - number=2, - message=gca_artifact.Artifact, - ) - artifact_id = proto.Field( - proto.STRING, - number=3, - ) - - -class GetArtifactRequest(proto.Message): - r"""Request message for - [MetadataService.GetArtifact][google.cloud.aiplatform.v1.MetadataService.GetArtifact]. - - Attributes: - name (str): - Required. The resource name of the Artifact to retrieve. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListArtifactsRequest(proto.Message): - r"""Request message for - [MetadataService.ListArtifacts][google.cloud.aiplatform.v1.MetadataService.ListArtifacts]. - - Attributes: - parent (str): - Required. The MetadataStore whose Artifacts should be - listed. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - page_size (int): - The maximum number of Artifacts to return. - The service may return fewer. Must be in range - 1-1000, inclusive. Defaults to 100. - page_token (str): - A page token, received from a previous - [MetadataService.ListArtifacts][google.cloud.aiplatform.v1.MetadataService.ListArtifacts] - call. Provide this to retrieve the subsequent page. - - When paginating, all other provided parameters must match - the call that provided the page token. (Otherwise the - request will fail with INVALID_ARGUMENT error.) - filter (str): - Filter specifying the boolean condition for the Artifacts to - satisfy in order to be part of the result set. The syntax to - define filter query is based on https://google.aip.dev/160. - The supported set of filters include the following: - - - **Attribute filtering**: For example: - ``display_name = "test"``. Supported fields include: - ``name``, ``display_name``, ``uri``, ``state``, - ``schema_title``, ``create_time``, and ``update_time``. - Time fields, such as ``create_time`` and ``update_time``, - require values specified in RFC-3339 format. For example: - ``create_time = "2020-11-19T11:30:00-04:00"`` - - **Metadata field**: To filter on metadata fields use - traversal operation as follows: - ``metadata..``. For example: - ``metadata.field_1.number_value = 10.0`` - - **Context based filtering**: To filter Artifacts based on - the contexts to which they belong, use the function - operator with the full resource name - ``in_context()``. For example: - ``in_context("projects//locations//metadataStores//contexts/")`` - - Each of the above supported filter types can be combined - together using logical operators (``AND`` & ``OR``). - - For example: - ``display_name = "test" AND metadata.field1.bool_value = true``. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - page_size = proto.Field( - proto.INT32, - number=2, - ) - page_token = proto.Field( - proto.STRING, - number=3, - ) - filter = proto.Field( - proto.STRING, - number=4, - ) - - -class ListArtifactsResponse(proto.Message): - r"""Response message for - [MetadataService.ListArtifacts][google.cloud.aiplatform.v1.MetadataService.ListArtifacts]. - - Attributes: - artifacts (Sequence[google.cloud.aiplatform_v1.types.Artifact]): - The Artifacts retrieved from the - MetadataStore. - next_page_token (str): - A token, which can be sent as - [ListArtifactsRequest.page_token][google.cloud.aiplatform.v1.ListArtifactsRequest.page_token] - to retrieve the next page. If this field is not populated, - there are no subsequent pages. - """ - - @property - def raw_page(self): - return self - - artifacts = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_artifact.Artifact, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class UpdateArtifactRequest(proto.Message): - r"""Request message for - [MetadataService.UpdateArtifact][google.cloud.aiplatform.v1.MetadataService.UpdateArtifact]. - - Attributes: - artifact (google.cloud.aiplatform_v1.types.Artifact): - Required. The Artifact containing updates. The Artifact's - [Artifact.name][google.cloud.aiplatform.v1.Artifact.name] - field is used to identify the Artifact to be updated. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. A FieldMask indicating which fields - should be updated. Functionality of this field - is not yet supported. - allow_missing (bool): - If set to true, and the - [Artifact][google.cloud.aiplatform.v1.Artifact] is not - found, a new [Artifact][google.cloud.aiplatform.v1.Artifact] - is created. - """ - - artifact = proto.Field( - proto.MESSAGE, - number=1, - message=gca_artifact.Artifact, - ) - update_mask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - allow_missing = proto.Field( - proto.BOOL, - number=3, - ) - - -class DeleteArtifactRequest(proto.Message): - r"""Request message for - [MetadataService.DeleteArtifact][google.cloud.aiplatform.v1.MetadataService.DeleteArtifact]. - - Attributes: - name (str): - Required. The resource name of the Artifact to delete. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` - etag (str): - Optional. The etag of the Artifact to delete. If this is - provided, it must match the server's etag. Otherwise, the - request will fail with a FAILED_PRECONDITION. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - etag = proto.Field( - proto.STRING, - number=2, - ) - - -class PurgeArtifactsRequest(proto.Message): - r"""Request message for - [MetadataService.PurgeArtifacts][google.cloud.aiplatform.v1.MetadataService.PurgeArtifacts]. - - Attributes: - parent (str): - Required. The metadata store to purge Artifacts from. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - filter (str): - Required. A required filter matching the Artifacts to be - purged. E.g., ``update_time <= 2020-11-19T11:30:00-04:00``. - force (bool): - Optional. Flag to indicate to actually perform the purge. If - ``force`` is set to false, the method will return a sample - of Artifact names that would be deleted. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - force = proto.Field( - proto.BOOL, - number=3, - ) - - -class PurgeArtifactsResponse(proto.Message): - r"""Response message for - [MetadataService.PurgeArtifacts][google.cloud.aiplatform.v1.MetadataService.PurgeArtifacts]. - - Attributes: - purge_count (int): - The number of Artifacts that this request deleted (or, if - ``force`` is false, the number of Artifacts that will be - deleted). This can be an estimate. - purge_sample (Sequence[str]): - A sample of the Artifact names that will be deleted. Only - populated if ``force`` is set to false. The maximum number - of samples is 100 (it is possible to return fewer). - """ - - purge_count = proto.Field( - proto.INT64, - number=1, - ) - purge_sample = proto.RepeatedField( - proto.STRING, - number=2, - ) - - -class PurgeArtifactsMetadata(proto.Message): - r"""Details of operations that perform - [MetadataService.PurgeArtifacts][google.cloud.aiplatform.v1.MetadataService.PurgeArtifacts]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): - Operation metadata for purging Artifacts. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class CreateContextRequest(proto.Message): - r"""Request message for - [MetadataService.CreateContext][google.cloud.aiplatform.v1.MetadataService.CreateContext]. - - Attributes: - parent (str): - Required. The resource name of the MetadataStore where the - Context should be created. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - context (google.cloud.aiplatform_v1.types.Context): - Required. The Context to create. - context_id (str): - The {context} portion of the resource name with the format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}``. - If not provided, the Context's ID will be a UUID generated - by the service. Must be 4-128 characters in length. Valid - characters are ``/[a-z][0-9]-/``. Must be unique across all - Contexts in the parent MetadataStore. (Otherwise the request - will fail with ALREADY_EXISTS, or PERMISSION_DENIED if the - caller can't view the preexisting Context.) - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - context = proto.Field( - proto.MESSAGE, - number=2, - message=gca_context.Context, - ) - context_id = proto.Field( - proto.STRING, - number=3, - ) - - -class GetContextRequest(proto.Message): - r"""Request message for - [MetadataService.GetContext][google.cloud.aiplatform.v1.MetadataService.GetContext]. - - Attributes: - name (str): - Required. The resource name of the Context to retrieve. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListContextsRequest(proto.Message): - r"""Request message for - [MetadataService.ListContexts][google.cloud.aiplatform.v1.MetadataService.ListContexts] - - Attributes: - parent (str): - Required. The MetadataStore whose Contexts should be listed. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - page_size (int): - The maximum number of Contexts to return. The - service may return fewer. Must be in range - 1-1000, inclusive. Defaults to 100. - page_token (str): - A page token, received from a previous - [MetadataService.ListContexts][google.cloud.aiplatform.v1.MetadataService.ListContexts] - call. Provide this to retrieve the subsequent page. - - When paginating, all other provided parameters must match - the call that provided the page token. (Otherwise the - request will fail with INVALID_ARGUMENT error.) - filter (str): - Filter specifying the boolean condition for the Contexts to - satisfy in order to be part of the result set. The syntax to - define filter query is based on https://google.aip.dev/160. - Following are the supported set of filters: - - - **Attribute filtering**: For example: - ``display_name = "test"``. Supported fields include: - ``name``, ``display_name``, ``schema_title``, - ``create_time``, and ``update_time``. Time fields, such - as ``create_time`` and ``update_time``, require values - specified in RFC-3339 format. For example: - ``create_time = "2020-11-19T11:30:00-04:00"``. - - - **Metadata field**: To filter on metadata fields use - traversal operation as follows: - ``metadata..``. For example: - ``metadata.field_1.number_value = 10.0``. - - - **Parent Child filtering**: To filter Contexts based on - parent-child relationship use the HAS operator as - follows: - - :: - - parent_contexts: - "projects//locations//metadataStores//contexts/" - child_contexts: - "projects//locations//metadataStores//contexts/" - - Each of the above supported filters can be combined together - using logical operators (``AND`` & ``OR``). - - For example: - ``display_name = "test" AND metadata.field1.bool_value = true``. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - page_size = proto.Field( - proto.INT32, - number=2, - ) - page_token = proto.Field( - proto.STRING, - number=3, - ) - filter = proto.Field( - proto.STRING, - number=4, - ) - - -class ListContextsResponse(proto.Message): - r"""Response message for - [MetadataService.ListContexts][google.cloud.aiplatform.v1.MetadataService.ListContexts]. - - Attributes: - contexts (Sequence[google.cloud.aiplatform_v1.types.Context]): - The Contexts retrieved from the - MetadataStore. - next_page_token (str): - A token, which can be sent as - [ListContextsRequest.page_token][google.cloud.aiplatform.v1.ListContextsRequest.page_token] - to retrieve the next page. If this field is not populated, - there are no subsequent pages. - """ - - @property - def raw_page(self): - return self - - contexts = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_context.Context, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class UpdateContextRequest(proto.Message): - r"""Request message for - [MetadataService.UpdateContext][google.cloud.aiplatform.v1.MetadataService.UpdateContext]. - - Attributes: - context (google.cloud.aiplatform_v1.types.Context): - Required. The Context containing updates. The Context's - [Context.name][google.cloud.aiplatform.v1.Context.name] - field is used to identify the Context to be updated. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. A FieldMask indicating which fields - should be updated. Functionality of this field - is not yet supported. - allow_missing (bool): - If set to true, and the - [Context][google.cloud.aiplatform.v1.Context] is not found, - a new [Context][google.cloud.aiplatform.v1.Context] is - created. - """ - - context = proto.Field( - proto.MESSAGE, - number=1, - message=gca_context.Context, - ) - update_mask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - allow_missing = proto.Field( - proto.BOOL, - number=3, - ) - - -class DeleteContextRequest(proto.Message): - r"""Request message for - [MetadataService.DeleteContext][google.cloud.aiplatform.v1.MetadataService.DeleteContext]. - - Attributes: - name (str): - Required. The resource name of the Context to delete. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` - force (bool): - The force deletion semantics is still - undefined. Users should not use this field. - etag (str): - Optional. The etag of the Context to delete. If this is - provided, it must match the server's etag. Otherwise, the - request will fail with a FAILED_PRECONDITION. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - force = proto.Field( - proto.BOOL, - number=2, - ) - etag = proto.Field( - proto.STRING, - number=3, - ) - - -class PurgeContextsRequest(proto.Message): - r"""Request message for - [MetadataService.PurgeContexts][google.cloud.aiplatform.v1.MetadataService.PurgeContexts]. - - Attributes: - parent (str): - Required. The metadata store to purge Contexts from. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - filter (str): - Required. A required filter matching the Contexts to be - purged. E.g., ``update_time <= 2020-11-19T11:30:00-04:00``. - force (bool): - Optional. Flag to indicate to actually perform the purge. If - ``force`` is set to false, the method will return a sample - of Context names that would be deleted. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - force = proto.Field( - proto.BOOL, - number=3, - ) - - -class PurgeContextsResponse(proto.Message): - r"""Response message for - [MetadataService.PurgeContexts][google.cloud.aiplatform.v1.MetadataService.PurgeContexts]. - - Attributes: - purge_count (int): - The number of Contexts that this request deleted (or, if - ``force`` is false, the number of Contexts that will be - deleted). This can be an estimate. - purge_sample (Sequence[str]): - A sample of the Context names that will be deleted. Only - populated if ``force`` is set to false. The maximum number - of samples is 100 (it is possible to return fewer). - """ - - purge_count = proto.Field( - proto.INT64, - number=1, - ) - purge_sample = proto.RepeatedField( - proto.STRING, - number=2, - ) - - -class PurgeContextsMetadata(proto.Message): - r"""Details of operations that perform - [MetadataService.PurgeContexts][google.cloud.aiplatform.v1.MetadataService.PurgeContexts]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): - Operation metadata for purging Contexts. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class AddContextArtifactsAndExecutionsRequest(proto.Message): - r"""Request message for - [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1.MetadataService.AddContextArtifactsAndExecutions]. - - Attributes: - context (str): - Required. The resource name of the Context that the - Artifacts and Executions belong to. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` - artifacts (Sequence[str]): - The resource names of the Artifacts to attribute to the - Context. - - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` - executions (Sequence[str]): - The resource names of the Executions to associate with the - Context. - - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` - """ - - context = proto.Field( - proto.STRING, - number=1, - ) - artifacts = proto.RepeatedField( - proto.STRING, - number=2, - ) - executions = proto.RepeatedField( - proto.STRING, - number=3, - ) - - -class AddContextArtifactsAndExecutionsResponse(proto.Message): - r"""Response message for - [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1.MetadataService.AddContextArtifactsAndExecutions]. - - """ - - -class AddContextChildrenRequest(proto.Message): - r"""Request message for - [MetadataService.AddContextChildren][google.cloud.aiplatform.v1.MetadataService.AddContextChildren]. - - Attributes: - context (str): - Required. The resource name of the parent Context. - - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` - child_contexts (Sequence[str]): - The resource names of the child Contexts. - """ - - context = proto.Field( - proto.STRING, - number=1, - ) - child_contexts = proto.RepeatedField( - proto.STRING, - number=2, - ) - - -class AddContextChildrenResponse(proto.Message): - r"""Response message for - [MetadataService.AddContextChildren][google.cloud.aiplatform.v1.MetadataService.AddContextChildren]. - - """ - - -class QueryContextLineageSubgraphRequest(proto.Message): - r"""Request message for - [MetadataService.QueryContextLineageSubgraph][google.cloud.aiplatform.v1.MetadataService.QueryContextLineageSubgraph]. - - Attributes: - context (str): - Required. The resource name of the Context whose Artifacts - and Executions should be retrieved as a LineageSubgraph. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` - - The request may error with FAILED_PRECONDITION if the number - of Artifacts, the number of Executions, or the number of - Events that would be returned for the Context exceeds 1000. - """ - - context = proto.Field( - proto.STRING, - number=1, - ) - - -class CreateExecutionRequest(proto.Message): - r"""Request message for - [MetadataService.CreateExecution][google.cloud.aiplatform.v1.MetadataService.CreateExecution]. - - Attributes: - parent (str): - Required. The resource name of the MetadataStore where the - Execution should be created. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - execution (google.cloud.aiplatform_v1.types.Execution): - Required. The Execution to create. - execution_id (str): - The {execution} portion of the resource name with the - format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` - If not provided, the Execution's ID will be a UUID generated - by the service. Must be 4-128 characters in length. Valid - characters are ``/[a-z][0-9]-/``. Must be unique across all - Executions in the parent MetadataStore. (Otherwise the - request will fail with ALREADY_EXISTS, or PERMISSION_DENIED - if the caller can't view the preexisting Execution.) - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - execution = proto.Field( - proto.MESSAGE, - number=2, - message=gca_execution.Execution, - ) - execution_id = proto.Field( - proto.STRING, - number=3, - ) - - -class GetExecutionRequest(proto.Message): - r"""Request message for - [MetadataService.GetExecution][google.cloud.aiplatform.v1.MetadataService.GetExecution]. - - Attributes: - name (str): - Required. The resource name of the Execution to retrieve. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListExecutionsRequest(proto.Message): - r"""Request message for - [MetadataService.ListExecutions][google.cloud.aiplatform.v1.MetadataService.ListExecutions]. - - Attributes: - parent (str): - Required. The MetadataStore whose Executions should be - listed. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - page_size (int): - The maximum number of Executions to return. - The service may return fewer. Must be in range - 1-1000, inclusive. Defaults to 100. - page_token (str): - A page token, received from a previous - [MetadataService.ListExecutions][google.cloud.aiplatform.v1.MetadataService.ListExecutions] - call. Provide this to retrieve the subsequent page. - - When paginating, all other provided parameters must match - the call that provided the page token. (Otherwise the - request will fail with an INVALID_ARGUMENT error.) - filter (str): - Filter specifying the boolean condition for the Executions - to satisfy in order to be part of the result set. The syntax - to define filter query is based on - https://google.aip.dev/160. Following are the supported set - of filters: - - - **Attribute filtering**: For example: - ``display_name = "test"``. Supported fields include: - ``name``, ``display_name``, ``state``, ``schema_title``, - ``create_time``, and ``update_time``. Time fields, such - as ``create_time`` and ``update_time``, require values - specified in RFC-3339 format. For example: - ``create_time = "2020-11-19T11:30:00-04:00"``. - - **Metadata field**: To filter on metadata fields use - traversal operation as follows: - ``metadata..`` For example: - ``metadata.field_1.number_value = 10.0`` - - **Context based filtering**: To filter Executions based - on the contexts to which they belong use the function - operator with the full resource name: - ``in_context()``. For example: - ``in_context("projects//locations//metadataStores//contexts/")`` - - Each of the above supported filters can be combined together - using logical operators (``AND`` & ``OR``). For example: - ``display_name = "test" AND metadata.field1.bool_value = true``. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - page_size = proto.Field( - proto.INT32, - number=2, - ) - page_token = proto.Field( - proto.STRING, - number=3, - ) - filter = proto.Field( - proto.STRING, - number=4, - ) - - -class ListExecutionsResponse(proto.Message): - r"""Response message for - [MetadataService.ListExecutions][google.cloud.aiplatform.v1.MetadataService.ListExecutions]. - - Attributes: - executions (Sequence[google.cloud.aiplatform_v1.types.Execution]): - The Executions retrieved from the - MetadataStore. - next_page_token (str): - A token, which can be sent as - [ListExecutionsRequest.page_token][google.cloud.aiplatform.v1.ListExecutionsRequest.page_token] - to retrieve the next page. If this field is not populated, - there are no subsequent pages. - """ - - @property - def raw_page(self): - return self - - executions = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_execution.Execution, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class UpdateExecutionRequest(proto.Message): - r"""Request message for - [MetadataService.UpdateExecution][google.cloud.aiplatform.v1.MetadataService.UpdateExecution]. - - Attributes: - execution (google.cloud.aiplatform_v1.types.Execution): - Required. The Execution containing updates. The Execution's - [Execution.name][google.cloud.aiplatform.v1.Execution.name] - field is used to identify the Execution to be updated. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. A FieldMask indicating which fields - should be updated. Functionality of this field - is not yet supported. - allow_missing (bool): - If set to true, and the - [Execution][google.cloud.aiplatform.v1.Execution] is not - found, a new - [Execution][google.cloud.aiplatform.v1.Execution] is - created. - """ - - execution = proto.Field( - proto.MESSAGE, - number=1, - message=gca_execution.Execution, - ) - update_mask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - allow_missing = proto.Field( - proto.BOOL, - number=3, - ) - - -class DeleteExecutionRequest(proto.Message): - r"""Request message for - [MetadataService.DeleteExecution][google.cloud.aiplatform.v1.MetadataService.DeleteExecution]. - - Attributes: - name (str): - Required. The resource name of the Execution to delete. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` - etag (str): - Optional. The etag of the Execution to delete. If this is - provided, it must match the server's etag. Otherwise, the - request will fail with a FAILED_PRECONDITION. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - etag = proto.Field( - proto.STRING, - number=2, - ) - - -class PurgeExecutionsRequest(proto.Message): - r"""Request message for - [MetadataService.PurgeExecutions][google.cloud.aiplatform.v1.MetadataService.PurgeExecutions]. - - Attributes: - parent (str): - Required. The metadata store to purge Executions from. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - filter (str): - Required. A required filter matching the Executions to be - purged. E.g., ``update_time <= 2020-11-19T11:30:00-04:00``. - force (bool): - Optional. Flag to indicate to actually perform the purge. If - ``force`` is set to false, the method will return a sample - of Execution names that would be deleted. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - force = proto.Field( - proto.BOOL, - number=3, - ) - - -class PurgeExecutionsResponse(proto.Message): - r"""Response message for - [MetadataService.PurgeExecutions][google.cloud.aiplatform.v1.MetadataService.PurgeExecutions]. - - Attributes: - purge_count (int): - The number of Executions that this request deleted (or, if - ``force`` is false, the number of Executions that will be - deleted). This can be an estimate. - purge_sample (Sequence[str]): - A sample of the Execution names that will be deleted. Only - populated if ``force`` is set to false. The maximum number - of samples is 100 (it is possible to return fewer). - """ - - purge_count = proto.Field( - proto.INT64, - number=1, - ) - purge_sample = proto.RepeatedField( - proto.STRING, - number=2, - ) - - -class PurgeExecutionsMetadata(proto.Message): - r"""Details of operations that perform - [MetadataService.PurgeExecutions][google.cloud.aiplatform.v1.MetadataService.PurgeExecutions]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): - Operation metadata for purging Executions. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class AddExecutionEventsRequest(proto.Message): - r"""Request message for - [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1.MetadataService.AddExecutionEvents]. - - Attributes: - execution (str): - Required. The resource name of the Execution that the Events - connect Artifacts with. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` - events (Sequence[google.cloud.aiplatform_v1.types.Event]): - The Events to create and add. - """ - - execution = proto.Field( - proto.STRING, - number=1, - ) - events = proto.RepeatedField( - proto.MESSAGE, - number=2, - message=event.Event, - ) - - -class AddExecutionEventsResponse(proto.Message): - r"""Response message for - [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1.MetadataService.AddExecutionEvents]. - - """ - - -class QueryExecutionInputsAndOutputsRequest(proto.Message): - r"""Request message for - [MetadataService.QueryExecutionInputsAndOutputs][google.cloud.aiplatform.v1.MetadataService.QueryExecutionInputsAndOutputs]. - - Attributes: - execution (str): - Required. The resource name of the Execution whose input and - output Artifacts should be retrieved as a LineageSubgraph. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` - """ - - execution = proto.Field( - proto.STRING, - number=1, - ) - - -class CreateMetadataSchemaRequest(proto.Message): - r"""Request message for - [MetadataService.CreateMetadataSchema][google.cloud.aiplatform.v1.MetadataService.CreateMetadataSchema]. - - Attributes: - parent (str): - Required. The resource name of the MetadataStore where the - MetadataSchema should be created. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - metadata_schema (google.cloud.aiplatform_v1.types.MetadataSchema): - Required. The MetadataSchema to create. - metadata_schema_id (str): - The {metadata_schema} portion of the resource name with the - format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema}`` - If not provided, the MetadataStore's ID will be a UUID - generated by the service. Must be 4-128 characters in - length. Valid characters are ``/[a-z][0-9]-/``. Must be - unique across all MetadataSchemas in the parent Location. - (Otherwise the request will fail with ALREADY_EXISTS, or - PERMISSION_DENIED if the caller can't view the preexisting - MetadataSchema.) - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - metadata_schema = proto.Field( - proto.MESSAGE, - number=2, - message=gca_metadata_schema.MetadataSchema, - ) - metadata_schema_id = proto.Field( - proto.STRING, - number=3, - ) - - -class GetMetadataSchemaRequest(proto.Message): - r"""Request message for - [MetadataService.GetMetadataSchema][google.cloud.aiplatform.v1.MetadataService.GetMetadataSchema]. - - Attributes: - name (str): - Required. The resource name of the MetadataSchema to - retrieve. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListMetadataSchemasRequest(proto.Message): - r"""Request message for - [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1.MetadataService.ListMetadataSchemas]. - - Attributes: - parent (str): - Required. The MetadataStore whose MetadataSchemas should be - listed. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - page_size (int): - The maximum number of MetadataSchemas to - return. The service may return fewer. - Must be in range 1-1000, inclusive. Defaults to - 100. - page_token (str): - A page token, received from a previous - [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1.MetadataService.ListMetadataSchemas] - call. Provide this to retrieve the next page. - - When paginating, all other provided parameters must match - the call that provided the page token. (Otherwise the - request will fail with INVALID_ARGUMENT error.) - filter (str): - A query to filter available MetadataSchemas - for matching results. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - page_size = proto.Field( - proto.INT32, - number=2, - ) - page_token = proto.Field( - proto.STRING, - number=3, - ) - filter = proto.Field( - proto.STRING, - number=4, - ) - - -class ListMetadataSchemasResponse(proto.Message): - r"""Response message for - [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1.MetadataService.ListMetadataSchemas]. - - Attributes: - metadata_schemas (Sequence[google.cloud.aiplatform_v1.types.MetadataSchema]): - The MetadataSchemas found for the - MetadataStore. - next_page_token (str): - A token, which can be sent as - [ListMetadataSchemasRequest.page_token][google.cloud.aiplatform.v1.ListMetadataSchemasRequest.page_token] - to retrieve the next page. If this field is not populated, - there are no subsequent pages. - """ - - @property - def raw_page(self): - return self - - metadata_schemas = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_metadata_schema.MetadataSchema, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class QueryArtifactLineageSubgraphRequest(proto.Message): - r"""Request message for - [MetadataService.QueryArtifactLineageSubgraph][google.cloud.aiplatform.v1.MetadataService.QueryArtifactLineageSubgraph]. - - Attributes: - artifact (str): - Required. The resource name of the Artifact whose Lineage - needs to be retrieved as a LineageSubgraph. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` - - The request may error with FAILED_PRECONDITION if the number - of Artifacts, the number of Executions, or the number of - Events that would be returned for the Context exceeds 1000. - max_hops (int): - Specifies the size of the lineage graph in terms of number - of hops from the specified artifact. Negative Value: - INVALID_ARGUMENT error is returned 0: Only input artifact is - returned. No value: Transitive closure is performed to - return the complete graph. - filter (str): - Filter specifying the boolean condition for the Artifacts to - satisfy in order to be part of the Lineage Subgraph. The - syntax to define filter query is based on - https://google.aip.dev/160. The supported set of filters - include the following: - - - **Attribute filtering**: For example: - ``display_name = "test"`` Supported fields include: - ``name``, ``display_name``, ``uri``, ``state``, - ``schema_title``, ``create_time``, and ``update_time``. - Time fields, such as ``create_time`` and ``update_time``, - require values specified in RFC-3339 format. For example: - ``create_time = "2020-11-19T11:30:00-04:00"`` - - **Metadata field**: To filter on metadata fields use - traversal operation as follows: - ``metadata..``. For example: - ``metadata.field_1.number_value = 10.0`` - - Each of the above supported filter types can be combined - together using logical operators (``AND`` & ``OR``). - - For example: - ``display_name = "test" AND metadata.field1.bool_value = true``. - """ - - artifact = proto.Field( - proto.STRING, - number=1, - ) - max_hops = proto.Field( - proto.INT32, - number=2, - ) - filter = proto.Field( - proto.STRING, - number=3, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/metadata_store.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/metadata_store.py deleted file mode 100644 index 884f4feab9..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/metadata_store.py +++ /dev/null @@ -1,100 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'MetadataStore', - }, -) - - -class MetadataStore(proto.Message): - r"""Instance of a metadata store. Contains a set of metadata that - can be queried. - - Attributes: - name (str): - Output only. The resource name of the - MetadataStore instance. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - MetadataStore was created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - MetadataStore was last updated. - encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec): - Customer-managed encryption key spec for a - Metadata Store. If set, this Metadata Store and - all sub-resources of this Metadata Store are - secured using this key. - description (str): - Description of the MetadataStore. - state (google.cloud.aiplatform_v1.types.MetadataStore.MetadataStoreState): - Output only. State information of the - MetadataStore. - """ - - class MetadataStoreState(proto.Message): - r"""Represents state information for a MetadataStore. - - Attributes: - disk_utilization_bytes (int): - The disk utilization of the MetadataStore in - bytes. - """ - - disk_utilization_bytes = proto.Field( - proto.INT64, - number=1, - ) - - name = proto.Field( - proto.STRING, - number=1, - ) - create_time = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - encryption_spec = proto.Field( - proto.MESSAGE, - number=5, - message=gca_encryption_spec.EncryptionSpec, - ) - description = proto.Field( - proto.STRING, - number=6, - ) - state = proto.Field( - proto.MESSAGE, - number=7, - message=MetadataStoreState, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/migratable_resource.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/migratable_resource.py deleted file mode 100644 index fff3e80bb1..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/migratable_resource.py +++ /dev/null @@ -1,228 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'MigratableResource', - }, -) - - -class MigratableResource(proto.Message): - r"""Represents one resource that exists in automl.googleapis.com, - datalabeling.googleapis.com or ml.googleapis.com. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - ml_engine_model_version (google.cloud.aiplatform_v1.types.MigratableResource.MlEngineModelVersion): - Output only. Represents one Version in - ml.googleapis.com. - - This field is a member of `oneof`_ ``resource``. - automl_model (google.cloud.aiplatform_v1.types.MigratableResource.AutomlModel): - Output only. Represents one Model in - automl.googleapis.com. - - This field is a member of `oneof`_ ``resource``. - automl_dataset (google.cloud.aiplatform_v1.types.MigratableResource.AutomlDataset): - Output only. Represents one Dataset in - automl.googleapis.com. - - This field is a member of `oneof`_ ``resource``. - data_labeling_dataset (google.cloud.aiplatform_v1.types.MigratableResource.DataLabelingDataset): - Output only. Represents one Dataset in - datalabeling.googleapis.com. - - This field is a member of `oneof`_ ``resource``. - last_migrate_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when the last - migration attempt on this MigratableResource - started. Will not be set if there's no migration - attempt on this MigratableResource. - last_update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - MigratableResource was last updated. - """ - - class MlEngineModelVersion(proto.Message): - r"""Represents one model Version in ml.googleapis.com. - - Attributes: - endpoint (str): - The ml.googleapis.com endpoint that this model Version - currently lives in. Example values: - - - ml.googleapis.com - - us-centrall-ml.googleapis.com - - europe-west4-ml.googleapis.com - - asia-east1-ml.googleapis.com - version (str): - Full resource name of ml engine model Version. Format: - ``projects/{project}/models/{model}/versions/{version}``. - """ - - endpoint = proto.Field( - proto.STRING, - number=1, - ) - version = proto.Field( - proto.STRING, - number=2, - ) - - class AutomlModel(proto.Message): - r"""Represents one Model in automl.googleapis.com. - - Attributes: - model (str): - Full resource name of automl Model. Format: - ``projects/{project}/locations/{location}/models/{model}``. - model_display_name (str): - The Model's display name in - automl.googleapis.com. - """ - - model = proto.Field( - proto.STRING, - number=1, - ) - model_display_name = proto.Field( - proto.STRING, - number=3, - ) - - class AutomlDataset(proto.Message): - r"""Represents one Dataset in automl.googleapis.com. - - Attributes: - dataset (str): - Full resource name of automl Dataset. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}``. - dataset_display_name (str): - The Dataset's display name in - automl.googleapis.com. - """ - - dataset = proto.Field( - proto.STRING, - number=1, - ) - dataset_display_name = proto.Field( - proto.STRING, - number=4, - ) - - class DataLabelingDataset(proto.Message): - r"""Represents one Dataset in datalabeling.googleapis.com. - - Attributes: - dataset (str): - Full resource name of data labeling Dataset. Format: - ``projects/{project}/datasets/{dataset}``. - dataset_display_name (str): - The Dataset's display name in - datalabeling.googleapis.com. - data_labeling_annotated_datasets (Sequence[google.cloud.aiplatform_v1.types.MigratableResource.DataLabelingDataset.DataLabelingAnnotatedDataset]): - The migratable AnnotatedDataset in - datalabeling.googleapis.com belongs to the data - labeling Dataset. - """ - - class DataLabelingAnnotatedDataset(proto.Message): - r"""Represents one AnnotatedDataset in - datalabeling.googleapis.com. - - Attributes: - annotated_dataset (str): - Full resource name of data labeling AnnotatedDataset. - Format: - ``projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}``. - annotated_dataset_display_name (str): - The AnnotatedDataset's display name in - datalabeling.googleapis.com. - """ - - annotated_dataset = proto.Field( - proto.STRING, - number=1, - ) - annotated_dataset_display_name = proto.Field( - proto.STRING, - number=3, - ) - - dataset = proto.Field( - proto.STRING, - number=1, - ) - dataset_display_name = proto.Field( - proto.STRING, - number=4, - ) - data_labeling_annotated_datasets = proto.RepeatedField( - proto.MESSAGE, - number=3, - message='MigratableResource.DataLabelingDataset.DataLabelingAnnotatedDataset', - ) - - ml_engine_model_version = proto.Field( - proto.MESSAGE, - number=1, - oneof='resource', - message=MlEngineModelVersion, - ) - automl_model = proto.Field( - proto.MESSAGE, - number=2, - oneof='resource', - message=AutomlModel, - ) - automl_dataset = proto.Field( - proto.MESSAGE, - number=3, - oneof='resource', - message=AutomlDataset, - ) - data_labeling_dataset = proto.Field( - proto.MESSAGE, - number=4, - oneof='resource', - message=DataLabelingDataset, - ) - last_migrate_time = proto.Field( - proto.MESSAGE, - number=5, - message=timestamp_pb2.Timestamp, - ) - last_update_time = proto.Field( - proto.MESSAGE, - number=6, - message=timestamp_pb2.Timestamp, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/migration_service.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/migration_service.py deleted file mode 100644 index d6e0d94970..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/migration_service.py +++ /dev/null @@ -1,479 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1.types import migratable_resource as gca_migratable_resource -from google.cloud.aiplatform_v1.types import operation -from google.rpc import status_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'SearchMigratableResourcesRequest', - 'SearchMigratableResourcesResponse', - 'BatchMigrateResourcesRequest', - 'MigrateResourceRequest', - 'BatchMigrateResourcesResponse', - 'MigrateResourceResponse', - 'BatchMigrateResourcesOperationMetadata', - }, -) - - -class SearchMigratableResourcesRequest(proto.Message): - r"""Request message for - [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1.MigrationService.SearchMigratableResources]. - - Attributes: - parent (str): - Required. The location that the migratable resources should - be searched from. It's the Vertex AI location that the - resources can be migrated to, not the resources' original - location. Format: - ``projects/{project}/locations/{location}`` - page_size (int): - The standard page size. - The default and maximum value is 100. - page_token (str): - The standard page token. - filter (str): - A filter for your search. You can use the following types of - filters: - - - Resource type filters. The following strings filter for a - specific type of - [MigratableResource][google.cloud.aiplatform.v1.MigratableResource]: - - - ``ml_engine_model_version:*`` - - ``automl_model:*`` - - ``automl_dataset:*`` - - ``data_labeling_dataset:*`` - - - "Migrated or not" filters. The following strings filter - for resources that either have or have not already been - migrated: - - - ``last_migrate_time:*`` filters for migrated - resources. - - ``NOT last_migrate_time:*`` filters for not yet - migrated resources. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - page_size = proto.Field( - proto.INT32, - number=2, - ) - page_token = proto.Field( - proto.STRING, - number=3, - ) - filter = proto.Field( - proto.STRING, - number=4, - ) - - -class SearchMigratableResourcesResponse(proto.Message): - r"""Response message for - [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1.MigrationService.SearchMigratableResources]. - - Attributes: - migratable_resources (Sequence[google.cloud.aiplatform_v1.types.MigratableResource]): - All migratable resources that can be migrated - to the location specified in the request. - next_page_token (str): - The standard next-page token. The migratable_resources may - not fill page_size in SearchMigratableResourcesRequest even - when there are subsequent pages. - """ - - @property - def raw_page(self): - return self - - migratable_resources = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_migratable_resource.MigratableResource, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class BatchMigrateResourcesRequest(proto.Message): - r"""Request message for - [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1.MigrationService.BatchMigrateResources]. - - Attributes: - parent (str): - Required. The location of the migrated resource will live - in. Format: ``projects/{project}/locations/{location}`` - migrate_resource_requests (Sequence[google.cloud.aiplatform_v1.types.MigrateResourceRequest]): - Required. The request messages specifying the - resources to migrate. They must be in the same - location as the destination. Up to 50 resources - can be migrated in one batch. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - migrate_resource_requests = proto.RepeatedField( - proto.MESSAGE, - number=2, - message='MigrateResourceRequest', - ) - - -class MigrateResourceRequest(proto.Message): - r"""Config of migrating one resource from automl.googleapis.com, - datalabeling.googleapis.com and ml.googleapis.com to Vertex AI. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - migrate_ml_engine_model_version_config (google.cloud.aiplatform_v1.types.MigrateResourceRequest.MigrateMlEngineModelVersionConfig): - Config for migrating Version in - ml.googleapis.com to Vertex AI's Model. - - This field is a member of `oneof`_ ``request``. - migrate_automl_model_config (google.cloud.aiplatform_v1.types.MigrateResourceRequest.MigrateAutomlModelConfig): - Config for migrating Model in - automl.googleapis.com to Vertex AI's Model. - - This field is a member of `oneof`_ ``request``. - migrate_automl_dataset_config (google.cloud.aiplatform_v1.types.MigrateResourceRequest.MigrateAutomlDatasetConfig): - Config for migrating Dataset in - automl.googleapis.com to Vertex AI's Dataset. - - This field is a member of `oneof`_ ``request``. - migrate_data_labeling_dataset_config (google.cloud.aiplatform_v1.types.MigrateResourceRequest.MigrateDataLabelingDatasetConfig): - Config for migrating Dataset in - datalabeling.googleapis.com to Vertex AI's - Dataset. - - This field is a member of `oneof`_ ``request``. - """ - - class MigrateMlEngineModelVersionConfig(proto.Message): - r"""Config for migrating version in ml.googleapis.com to Vertex - AI's Model. - - Attributes: - endpoint (str): - Required. The ml.googleapis.com endpoint that this model - version should be migrated from. Example values: - - - ml.googleapis.com - - - us-centrall-ml.googleapis.com - - - europe-west4-ml.googleapis.com - - - asia-east1-ml.googleapis.com - model_version (str): - Required. Full resource name of ml engine model version. - Format: - ``projects/{project}/models/{model}/versions/{version}``. - model_display_name (str): - Required. Display name of the model in Vertex - AI. System will pick a display name if - unspecified. - """ - - endpoint = proto.Field( - proto.STRING, - number=1, - ) - model_version = proto.Field( - proto.STRING, - number=2, - ) - model_display_name = proto.Field( - proto.STRING, - number=3, - ) - - class MigrateAutomlModelConfig(proto.Message): - r"""Config for migrating Model in automl.googleapis.com to Vertex - AI's Model. - - Attributes: - model (str): - Required. Full resource name of automl Model. Format: - ``projects/{project}/locations/{location}/models/{model}``. - model_display_name (str): - Optional. Display name of the model in Vertex - AI. System will pick a display name if - unspecified. - """ - - model = proto.Field( - proto.STRING, - number=1, - ) - model_display_name = proto.Field( - proto.STRING, - number=2, - ) - - class MigrateAutomlDatasetConfig(proto.Message): - r"""Config for migrating Dataset in automl.googleapis.com to - Vertex AI's Dataset. - - Attributes: - dataset (str): - Required. Full resource name of automl Dataset. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}``. - dataset_display_name (str): - Required. Display name of the Dataset in - Vertex AI. System will pick a display name if - unspecified. - """ - - dataset = proto.Field( - proto.STRING, - number=1, - ) - dataset_display_name = proto.Field( - proto.STRING, - number=2, - ) - - class MigrateDataLabelingDatasetConfig(proto.Message): - r"""Config for migrating Dataset in datalabeling.googleapis.com - to Vertex AI's Dataset. - - Attributes: - dataset (str): - Required. Full resource name of data labeling Dataset. - Format: ``projects/{project}/datasets/{dataset}``. - dataset_display_name (str): - Optional. Display name of the Dataset in - Vertex AI. System will pick a display name if - unspecified. - migrate_data_labeling_annotated_dataset_configs (Sequence[google.cloud.aiplatform_v1.types.MigrateResourceRequest.MigrateDataLabelingDatasetConfig.MigrateDataLabelingAnnotatedDatasetConfig]): - Optional. Configs for migrating - AnnotatedDataset in datalabeling.googleapis.com - to Vertex AI's SavedQuery. The specified - AnnotatedDatasets have to belong to the - datalabeling Dataset. - """ - - class MigrateDataLabelingAnnotatedDatasetConfig(proto.Message): - r"""Config for migrating AnnotatedDataset in - datalabeling.googleapis.com to Vertex AI's SavedQuery. - - Attributes: - annotated_dataset (str): - Required. Full resource name of data labeling - AnnotatedDataset. Format: - ``projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}``. - """ - - annotated_dataset = proto.Field( - proto.STRING, - number=1, - ) - - dataset = proto.Field( - proto.STRING, - number=1, - ) - dataset_display_name = proto.Field( - proto.STRING, - number=2, - ) - migrate_data_labeling_annotated_dataset_configs = proto.RepeatedField( - proto.MESSAGE, - number=3, - message='MigrateResourceRequest.MigrateDataLabelingDatasetConfig.MigrateDataLabelingAnnotatedDatasetConfig', - ) - - migrate_ml_engine_model_version_config = proto.Field( - proto.MESSAGE, - number=1, - oneof='request', - message=MigrateMlEngineModelVersionConfig, - ) - migrate_automl_model_config = proto.Field( - proto.MESSAGE, - number=2, - oneof='request', - message=MigrateAutomlModelConfig, - ) - migrate_automl_dataset_config = proto.Field( - proto.MESSAGE, - number=3, - oneof='request', - message=MigrateAutomlDatasetConfig, - ) - migrate_data_labeling_dataset_config = proto.Field( - proto.MESSAGE, - number=4, - oneof='request', - message=MigrateDataLabelingDatasetConfig, - ) - - -class BatchMigrateResourcesResponse(proto.Message): - r"""Response message for - [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1.MigrationService.BatchMigrateResources]. - - Attributes: - migrate_resource_responses (Sequence[google.cloud.aiplatform_v1.types.MigrateResourceResponse]): - Successfully migrated resources. - """ - - migrate_resource_responses = proto.RepeatedField( - proto.MESSAGE, - number=1, - message='MigrateResourceResponse', - ) - - -class MigrateResourceResponse(proto.Message): - r"""Describes a successfully migrated resource. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - dataset (str): - Migrated Dataset's resource name. - - This field is a member of `oneof`_ ``migrated_resource``. - model (str): - Migrated Model's resource name. - - This field is a member of `oneof`_ ``migrated_resource``. - migratable_resource (google.cloud.aiplatform_v1.types.MigratableResource): - Before migration, the identifier in - ml.googleapis.com, automl.googleapis.com or - datalabeling.googleapis.com. - """ - - dataset = proto.Field( - proto.STRING, - number=1, - oneof='migrated_resource', - ) - model = proto.Field( - proto.STRING, - number=2, - oneof='migrated_resource', - ) - migratable_resource = proto.Field( - proto.MESSAGE, - number=3, - message=gca_migratable_resource.MigratableResource, - ) - - -class BatchMigrateResourcesOperationMetadata(proto.Message): - r"""Runtime operation information for - [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1.MigrationService.BatchMigrateResources]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): - The common part of the operation metadata. - partial_results (Sequence[google.cloud.aiplatform_v1.types.BatchMigrateResourcesOperationMetadata.PartialResult]): - Partial results that reflect the latest - migration operation progress. - """ - - class PartialResult(proto.Message): - r"""Represents a partial result in batch migration operation for one - [MigrateResourceRequest][google.cloud.aiplatform.v1.MigrateResourceRequest]. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - error (google.rpc.status_pb2.Status): - The error result of the migration request in - case of failure. - - This field is a member of `oneof`_ ``result``. - model (str): - Migrated model resource name. - - This field is a member of `oneof`_ ``result``. - dataset (str): - Migrated dataset resource name. - - This field is a member of `oneof`_ ``result``. - request (google.cloud.aiplatform_v1.types.MigrateResourceRequest): - It's the same as the value in - [MigrateResourceRequest.migrate_resource_requests][]. - """ - - error = proto.Field( - proto.MESSAGE, - number=2, - oneof='result', - message=status_pb2.Status, - ) - model = proto.Field( - proto.STRING, - number=3, - oneof='result', - ) - dataset = proto.Field( - proto.STRING, - number=4, - oneof='result', - ) - request = proto.Field( - proto.MESSAGE, - number=1, - message='MigrateResourceRequest', - ) - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - partial_results = proto.RepeatedField( - proto.MESSAGE, - number=2, - message=PartialResult, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model.py deleted file mode 100644 index 304ce39dcf..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model.py +++ /dev/null @@ -1,754 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1.types import deployed_model_ref -from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec -from google.cloud.aiplatform_v1.types import env_var -from google.cloud.aiplatform_v1.types import explanation -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'Model', - 'PredictSchemata', - 'ModelContainerSpec', - 'Port', - }, -) - - -class Model(proto.Message): - r"""A trained machine learning Model. - - Attributes: - name (str): - The resource name of the Model. - display_name (str): - Required. The display name of the Model. - The name can be up to 128 characters long and - can be consist of any UTF-8 characters. - description (str): - The description of the Model. - predict_schemata (google.cloud.aiplatform_v1.types.PredictSchemata): - The schemata that describe formats of the Model's - predictions and explanations as given and returned via - [PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict] - and - [PredictionService.Explain][google.cloud.aiplatform.v1.PredictionService.Explain]. - metadata_schema_uri (str): - Immutable. Points to a YAML file stored on Google Cloud - Storage describing additional information about the Model, - that is specific to it. Unset if the Model does not have any - additional information. The schema is defined as an OpenAPI - 3.0.2 `Schema - Object `__. - AutoML Models always have this field populated by Vertex AI, - if no additional metadata is needed, this field is set to an - empty string. Note: The URI given on output will be - immutable and probably different, including the URI scheme, - than the one given on input. The output URI will point to a - location where the user only has a read access. - metadata (google.protobuf.struct_pb2.Value): - Immutable. An additional information about the Model; the - schema of the metadata can be found in - [metadata_schema][google.cloud.aiplatform.v1.Model.metadata_schema_uri]. - Unset if the Model does not have any additional information. - supported_export_formats (Sequence[google.cloud.aiplatform_v1.types.Model.ExportFormat]): - Output only. The formats in which this Model - may be exported. If empty, this Model is not - available for export. - training_pipeline (str): - Output only. The resource name of the - TrainingPipeline that uploaded this Model, if - any. - container_spec (google.cloud.aiplatform_v1.types.ModelContainerSpec): - Input only. The specification of the container that is to be - used when deploying this Model. The specification is - ingested upon - [ModelService.UploadModel][google.cloud.aiplatform.v1.ModelService.UploadModel], - and all binaries it contains are copied and stored - internally by Vertex AI. Not present for AutoML Models. - artifact_uri (str): - Immutable. The path to the directory - containing the Model artifact and any of its - supporting files. Not present for AutoML Models. - supported_deployment_resources_types (Sequence[google.cloud.aiplatform_v1.types.Model.DeploymentResourcesType]): - Output only. When this Model is deployed, its prediction - resources are described by the ``prediction_resources`` - field of the - [Endpoint.deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models] - object. Because not all Models support all resource - configuration types, the configuration types this Model - supports are listed here. If no configuration types are - listed, the Model cannot be deployed to an - [Endpoint][google.cloud.aiplatform.v1.Endpoint] and does not - support online predictions - ([PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict] - or - [PredictionService.Explain][google.cloud.aiplatform.v1.PredictionService.Explain]). - Such a Model can serve predictions by using a - [BatchPredictionJob][google.cloud.aiplatform.v1.BatchPredictionJob], - if it has at least one entry each in - [supported_input_storage_formats][google.cloud.aiplatform.v1.Model.supported_input_storage_formats] - and - [supported_output_storage_formats][google.cloud.aiplatform.v1.Model.supported_output_storage_formats]. - supported_input_storage_formats (Sequence[str]): - Output only. The formats this Model supports in - [BatchPredictionJob.input_config][google.cloud.aiplatform.v1.BatchPredictionJob.input_config]. - If - [PredictSchemata.instance_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri] - exists, the instances should be given as per that schema. - - The possible formats are: - - - ``jsonl`` The JSON Lines format, where each instance is a - single line. Uses - [GcsSource][google.cloud.aiplatform.v1.BatchPredictionJob.InputConfig.gcs_source]. - - - ``csv`` The CSV format, where each instance is a single - comma-separated line. The first line in the file is the - header, containing comma-separated field names. Uses - [GcsSource][google.cloud.aiplatform.v1.BatchPredictionJob.InputConfig.gcs_source]. - - - ``tf-record`` The TFRecord format, where each instance is - a single record in tfrecord syntax. Uses - [GcsSource][google.cloud.aiplatform.v1.BatchPredictionJob.InputConfig.gcs_source]. - - - ``tf-record-gzip`` Similar to ``tf-record``, but the file - is gzipped. Uses - [GcsSource][google.cloud.aiplatform.v1.BatchPredictionJob.InputConfig.gcs_source]. - - - ``bigquery`` Each instance is a single row in BigQuery. - Uses - [BigQuerySource][google.cloud.aiplatform.v1.BatchPredictionJob.InputConfig.bigquery_source]. - - - ``file-list`` Each line of the file is the location of an - instance to process, uses ``gcs_source`` field of the - [InputConfig][google.cloud.aiplatform.v1.BatchPredictionJob.InputConfig] - object. - - If this Model doesn't support any of these formats it means - it cannot be used with a - [BatchPredictionJob][google.cloud.aiplatform.v1.BatchPredictionJob]. - However, if it has - [supported_deployment_resources_types][google.cloud.aiplatform.v1.Model.supported_deployment_resources_types], - it could serve online predictions by using - [PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict] - or - [PredictionService.Explain][google.cloud.aiplatform.v1.PredictionService.Explain]. - supported_output_storage_formats (Sequence[str]): - Output only. The formats this Model supports in - [BatchPredictionJob.output_config][google.cloud.aiplatform.v1.BatchPredictionJob.output_config]. - If both - [PredictSchemata.instance_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri] - and - [PredictSchemata.prediction_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.prediction_schema_uri] - exist, the predictions are returned together with their - instances. In other words, the prediction has the original - instance data first, followed by the actual prediction - content (as per the schema). - - The possible formats are: - - - ``jsonl`` The JSON Lines format, where each prediction is - a single line. Uses - [GcsDestination][google.cloud.aiplatform.v1.BatchPredictionJob.OutputConfig.gcs_destination]. - - - ``csv`` The CSV format, where each prediction is a single - comma-separated line. The first line in the file is the - header, containing comma-separated field names. Uses - [GcsDestination][google.cloud.aiplatform.v1.BatchPredictionJob.OutputConfig.gcs_destination]. - - - ``bigquery`` Each prediction is a single row in a - BigQuery table, uses - [BigQueryDestination][google.cloud.aiplatform.v1.BatchPredictionJob.OutputConfig.bigquery_destination] - . - - If this Model doesn't support any of these formats it means - it cannot be used with a - [BatchPredictionJob][google.cloud.aiplatform.v1.BatchPredictionJob]. - However, if it has - [supported_deployment_resources_types][google.cloud.aiplatform.v1.Model.supported_deployment_resources_types], - it could serve online predictions by using - [PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict] - or - [PredictionService.Explain][google.cloud.aiplatform.v1.PredictionService.Explain]. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Model was - uploaded into Vertex AI. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Model was - most recently updated. - deployed_models (Sequence[google.cloud.aiplatform_v1.types.DeployedModelRef]): - Output only. The pointers to DeployedModels - created from this Model. Note that Model could - have been deployed to Endpoints in different - Locations. - explanation_spec (google.cloud.aiplatform_v1.types.ExplanationSpec): - The default explanation specification for this Model. - - The Model can be used for [requesting - explanation][PredictionService.Explain] after being - [deployed][google.cloud.aiplatform.v1.EndpointService.DeployModel] - if it is populated. The Model can be used for [batch - explanation][BatchPredictionJob.generate_explanation] if it - is populated. - - All fields of the explanation_spec can be overridden by - [explanation_spec][google.cloud.aiplatform.v1.DeployedModel.explanation_spec] - of - [DeployModelRequest.deployed_model][google.cloud.aiplatform.v1.DeployModelRequest.deployed_model], - or - [explanation_spec][google.cloud.aiplatform.v1.BatchPredictionJob.explanation_spec] - of - [BatchPredictionJob][google.cloud.aiplatform.v1.BatchPredictionJob]. - - If the default explanation specification is not set for this - Model, this Model can still be used for [requesting - explanation][PredictionService.Explain] by setting - [explanation_spec][google.cloud.aiplatform.v1.DeployedModel.explanation_spec] - of - [DeployModelRequest.deployed_model][google.cloud.aiplatform.v1.DeployModelRequest.deployed_model] - and for [batch - explanation][BatchPredictionJob.generate_explanation] by - setting - [explanation_spec][google.cloud.aiplatform.v1.BatchPredictionJob.explanation_spec] - of - [BatchPredictionJob][google.cloud.aiplatform.v1.BatchPredictionJob]. - etag (str): - Used to perform consistent read-modify-write - updates. If not set, a blind "overwrite" update - happens. - labels (Sequence[google.cloud.aiplatform_v1.types.Model.LabelsEntry]): - The labels with user-defined metadata to - organize your Models. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. - See https://goo.gl/xmQnxf for more information - and examples of labels. - encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec): - Customer-managed encryption key spec for a - Model. If set, this Model and all sub-resources - of this Model will be secured by this key. - """ - class DeploymentResourcesType(proto.Enum): - r"""Identifies a type of Model's prediction resources.""" - DEPLOYMENT_RESOURCES_TYPE_UNSPECIFIED = 0 - DEDICATED_RESOURCES = 1 - AUTOMATIC_RESOURCES = 2 - - class ExportFormat(proto.Message): - r"""Represents export format supported by the Model. - All formats export to Google Cloud Storage. - - Attributes: - id (str): - Output only. The ID of the export format. The possible - format IDs are: - - - ``tflite`` Used for Android mobile devices. - - - ``edgetpu-tflite`` Used for `Edge - TPU `__ devices. - - - ``tf-saved-model`` A tensorflow model in SavedModel - format. - - - ``tf-js`` A - `TensorFlow.js `__ model - that can be used in the browser and in Node.js using - JavaScript. - - - ``core-ml`` Used for iOS mobile devices. - - - ``custom-trained`` A Model that was uploaded or trained - by custom code. - exportable_contents (Sequence[google.cloud.aiplatform_v1.types.Model.ExportFormat.ExportableContent]): - Output only. The content of this Model that - may be exported. - """ - class ExportableContent(proto.Enum): - r"""The Model content that can be exported.""" - EXPORTABLE_CONTENT_UNSPECIFIED = 0 - ARTIFACT = 1 - IMAGE = 2 - - id = proto.Field( - proto.STRING, - number=1, - ) - exportable_contents = proto.RepeatedField( - proto.ENUM, - number=2, - enum='Model.ExportFormat.ExportableContent', - ) - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - description = proto.Field( - proto.STRING, - number=3, - ) - predict_schemata = proto.Field( - proto.MESSAGE, - number=4, - message='PredictSchemata', - ) - metadata_schema_uri = proto.Field( - proto.STRING, - number=5, - ) - metadata = proto.Field( - proto.MESSAGE, - number=6, - message=struct_pb2.Value, - ) - supported_export_formats = proto.RepeatedField( - proto.MESSAGE, - number=20, - message=ExportFormat, - ) - training_pipeline = proto.Field( - proto.STRING, - number=7, - ) - container_spec = proto.Field( - proto.MESSAGE, - number=9, - message='ModelContainerSpec', - ) - artifact_uri = proto.Field( - proto.STRING, - number=26, - ) - supported_deployment_resources_types = proto.RepeatedField( - proto.ENUM, - number=10, - enum=DeploymentResourcesType, - ) - supported_input_storage_formats = proto.RepeatedField( - proto.STRING, - number=11, - ) - supported_output_storage_formats = proto.RepeatedField( - proto.STRING, - number=12, - ) - create_time = proto.Field( - proto.MESSAGE, - number=13, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=14, - message=timestamp_pb2.Timestamp, - ) - deployed_models = proto.RepeatedField( - proto.MESSAGE, - number=15, - message=deployed_model_ref.DeployedModelRef, - ) - explanation_spec = proto.Field( - proto.MESSAGE, - number=23, - message=explanation.ExplanationSpec, - ) - etag = proto.Field( - proto.STRING, - number=16, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=17, - ) - encryption_spec = proto.Field( - proto.MESSAGE, - number=24, - message=gca_encryption_spec.EncryptionSpec, - ) - - -class PredictSchemata(proto.Message): - r"""Contains the schemata used in Model's predictions and explanations - via - [PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict], - [PredictionService.Explain][google.cloud.aiplatform.v1.PredictionService.Explain] - and - [BatchPredictionJob][google.cloud.aiplatform.v1.BatchPredictionJob]. - - Attributes: - instance_schema_uri (str): - Immutable. Points to a YAML file stored on Google Cloud - Storage describing the format of a single instance, which - are used in - [PredictRequest.instances][google.cloud.aiplatform.v1.PredictRequest.instances], - [ExplainRequest.instances][google.cloud.aiplatform.v1.ExplainRequest.instances] - and - [BatchPredictionJob.input_config][google.cloud.aiplatform.v1.BatchPredictionJob.input_config]. - The schema is defined as an OpenAPI 3.0.2 `Schema - Object `__. - AutoML Models always have this field populated by Vertex AI. - Note: The URI given on output will be immutable and probably - different, including the URI scheme, than the one given on - input. The output URI will point to a location where the - user only has a read access. - parameters_schema_uri (str): - Immutable. Points to a YAML file stored on Google Cloud - Storage describing the parameters of prediction and - explanation via - [PredictRequest.parameters][google.cloud.aiplatform.v1.PredictRequest.parameters], - [ExplainRequest.parameters][google.cloud.aiplatform.v1.ExplainRequest.parameters] - and - [BatchPredictionJob.model_parameters][google.cloud.aiplatform.v1.BatchPredictionJob.model_parameters]. - The schema is defined as an OpenAPI 3.0.2 `Schema - Object `__. - AutoML Models always have this field populated by Vertex AI, - if no parameters are supported, then it is set to an empty - string. Note: The URI given on output will be immutable and - probably different, including the URI scheme, than the one - given on input. The output URI will point to a location - where the user only has a read access. - prediction_schema_uri (str): - Immutable. Points to a YAML file stored on Google Cloud - Storage describing the format of a single prediction - produced by this Model, which are returned via - [PredictResponse.predictions][google.cloud.aiplatform.v1.PredictResponse.predictions], - [ExplainResponse.explanations][google.cloud.aiplatform.v1.ExplainResponse.explanations], - and - [BatchPredictionJob.output_config][google.cloud.aiplatform.v1.BatchPredictionJob.output_config]. - The schema is defined as an OpenAPI 3.0.2 `Schema - Object `__. - AutoML Models always have this field populated by Vertex AI. - Note: The URI given on output will be immutable and probably - different, including the URI scheme, than the one given on - input. The output URI will point to a location where the - user only has a read access. - """ - - instance_schema_uri = proto.Field( - proto.STRING, - number=1, - ) - parameters_schema_uri = proto.Field( - proto.STRING, - number=2, - ) - prediction_schema_uri = proto.Field( - proto.STRING, - number=3, - ) - - -class ModelContainerSpec(proto.Message): - r"""Specification of a container for serving predictions. Some fields in - this message correspond to fields in the `Kubernetes Container v1 - core - specification `__. - - Attributes: - image_uri (str): - Required. Immutable. URI of the Docker image to be used as - the custom container for serving predictions. This URI must - identify an image in Artifact Registry or Container - Registry. Learn more about the `container publishing - requirements `__, - including permissions requirements for the Vertex AI Service - Agent. - - The container image is ingested upon - [ModelService.UploadModel][google.cloud.aiplatform.v1.ModelService.UploadModel], - stored internally, and this original path is afterwards not - used. - - To learn about the requirements for the Docker image itself, - see `Custom container - requirements `__. - - You can use the URI to one of Vertex AI's `pre-built - container images for - prediction `__ - in this field. - command (Sequence[str]): - Immutable. Specifies the command that runs when the - container starts. This overrides the container's - `ENTRYPOINT `__. - Specify this field as an array of executable and arguments, - similar to a Docker ``ENTRYPOINT``'s "exec" form, not its - "shell" form. - - If you do not specify this field, then the container's - ``ENTRYPOINT`` runs, in conjunction with the - [args][google.cloud.aiplatform.v1.ModelContainerSpec.args] - field or the container's - ```CMD`` `__, - if either exists. If this field is not specified and the - container does not have an ``ENTRYPOINT``, then refer to the - Docker documentation about `how ``CMD`` and ``ENTRYPOINT`` - interact `__. - - If you specify this field, then you can also specify the - ``args`` field to provide additional arguments for this - command. However, if you specify this field, then the - container's ``CMD`` is ignored. See the `Kubernetes - documentation about how the ``command`` and ``args`` fields - interact with a container's ``ENTRYPOINT`` and - ``CMD`` `__. - - In this field, you can reference `environment variables set - by Vertex - AI `__ - and environment variables set in the - [env][google.cloud.aiplatform.v1.ModelContainerSpec.env] - field. You cannot reference environment variables set in the - Docker image. In order for environment variables to be - expanded, reference them by using the following syntax: - $(VARIABLE_NAME) Note that this differs from Bash variable - expansion, which does not use parentheses. If a variable - cannot be resolved, the reference in the input string is - used unchanged. To avoid variable expansion, you can escape - this syntax with ``$$``; for example: $$(VARIABLE_NAME) This - field corresponds to the ``command`` field of the Kubernetes - Containers `v1 core - API `__. - args (Sequence[str]): - Immutable. Specifies arguments for the command that runs - when the container starts. This overrides the container's - ```CMD`` `__. - Specify this field as an array of executable and arguments, - similar to a Docker ``CMD``'s "default parameters" form. - - If you don't specify this field but do specify the - [command][google.cloud.aiplatform.v1.ModelContainerSpec.command] - field, then the command from the ``command`` field runs - without any additional arguments. See the `Kubernetes - documentation about how the ``command`` and ``args`` fields - interact with a container's ``ENTRYPOINT`` and - ``CMD`` `__. - - If you don't specify this field and don't specify the - ``command`` field, then the container's - ```ENTRYPOINT`` `__ - and ``CMD`` determine what runs based on their default - behavior. See the Docker documentation about `how ``CMD`` - and ``ENTRYPOINT`` - interact `__. - - In this field, you can reference `environment variables set - by Vertex - AI `__ - and environment variables set in the - [env][google.cloud.aiplatform.v1.ModelContainerSpec.env] - field. You cannot reference environment variables set in the - Docker image. In order for environment variables to be - expanded, reference them by using the following syntax: - $(VARIABLE_NAME) Note that this differs from Bash variable - expansion, which does not use parentheses. If a variable - cannot be resolved, the reference in the input string is - used unchanged. To avoid variable expansion, you can escape - this syntax with ``$$``; for example: $$(VARIABLE_NAME) This - field corresponds to the ``args`` field of the Kubernetes - Containers `v1 core - API `__. - env (Sequence[google.cloud.aiplatform_v1.types.EnvVar]): - Immutable. List of environment variables to set in the - container. After the container starts running, code running - in the container can read these environment variables. - - Additionally, the - [command][google.cloud.aiplatform.v1.ModelContainerSpec.command] - and - [args][google.cloud.aiplatform.v1.ModelContainerSpec.args] - fields can reference these variables. Later entries in this - list can also reference earlier entries. For example, the - following example sets the variable ``VAR_2`` to have the - value ``foo bar``: - - .. code:: json - - [ - { - "name": "VAR_1", - "value": "foo" - }, - { - "name": "VAR_2", - "value": "$(VAR_1) bar" - } - ] - - If you switch the order of the variables in the example, - then the expansion does not occur. - - This field corresponds to the ``env`` field of the - Kubernetes Containers `v1 core - API `__. - ports (Sequence[google.cloud.aiplatform_v1.types.Port]): - Immutable. List of ports to expose from the container. - Vertex AI sends any prediction requests that it receives to - the first port on this list. Vertex AI also sends `liveness - and health - checks `__ - to this port. - - If you do not specify this field, it defaults to following - value: - - .. code:: json - - [ - { - "containerPort": 8080 - } - ] - - Vertex AI does not use ports other than the first one - listed. This field corresponds to the ``ports`` field of the - Kubernetes Containers `v1 core - API `__. - predict_route (str): - Immutable. HTTP path on the container to send prediction - requests to. Vertex AI forwards requests sent using - [projects.locations.endpoints.predict][google.cloud.aiplatform.v1.PredictionService.Predict] - to this path on the container's IP address and port. Vertex - AI then returns the container's response in the API - response. - - For example, if you set this field to ``/foo``, then when - Vertex AI receives a prediction request, it forwards the - request body in a POST request to the ``/foo`` path on the - port of your container specified by the first value of this - ``ModelContainerSpec``'s - [ports][google.cloud.aiplatform.v1.ModelContainerSpec.ports] - field. - - If you don't specify this field, it defaults to the - following value when you [deploy this Model to an - Endpoint][google.cloud.aiplatform.v1.EndpointService.DeployModel]: - /v1/endpoints/ENDPOINT/deployedModels/DEPLOYED_MODEL:predict - The placeholders in this value are replaced as follows: - - - ENDPOINT: The last segment (following ``endpoints/``)of - the Endpoint.name][] field of the Endpoint where this - Model has been deployed. (Vertex AI makes this value - available to your container code as the - ```AIP_ENDPOINT_ID`` environment - variable `__.) - - - DEPLOYED_MODEL: - [DeployedModel.id][google.cloud.aiplatform.v1.DeployedModel.id] - of the ``DeployedModel``. (Vertex AI makes this value - available to your container code as the - ```AIP_DEPLOYED_MODEL_ID`` environment - variable `__.) - health_route (str): - Immutable. HTTP path on the container to send health checks - to. Vertex AI intermittently sends GET requests to this path - on the container's IP address and port to check that the - container is healthy. Read more about `health - checks `__. - - For example, if you set this field to ``/bar``, then Vertex - AI intermittently sends a GET request to the ``/bar`` path - on the port of your container specified by the first value - of this ``ModelContainerSpec``'s - [ports][google.cloud.aiplatform.v1.ModelContainerSpec.ports] - field. - - If you don't specify this field, it defaults to the - following value when you [deploy this Model to an - Endpoint][google.cloud.aiplatform.v1.EndpointService.DeployModel]: - /v1/endpoints/ENDPOINT/deployedModels/DEPLOYED_MODEL:predict - The placeholders in this value are replaced as follows: - - - ENDPOINT: The last segment (following ``endpoints/``)of - the Endpoint.name][] field of the Endpoint where this - Model has been deployed. (Vertex AI makes this value - available to your container code as the - ```AIP_ENDPOINT_ID`` environment - variable `__.) - - - DEPLOYED_MODEL: - [DeployedModel.id][google.cloud.aiplatform.v1.DeployedModel.id] - of the ``DeployedModel``. (Vertex AI makes this value - available to your container code as the - ```AIP_DEPLOYED_MODEL_ID`` environment - variable `__.) - """ - - image_uri = proto.Field( - proto.STRING, - number=1, - ) - command = proto.RepeatedField( - proto.STRING, - number=2, - ) - args = proto.RepeatedField( - proto.STRING, - number=3, - ) - env = proto.RepeatedField( - proto.MESSAGE, - number=4, - message=env_var.EnvVar, - ) - ports = proto.RepeatedField( - proto.MESSAGE, - number=5, - message='Port', - ) - predict_route = proto.Field( - proto.STRING, - number=6, - ) - health_route = proto.Field( - proto.STRING, - number=7, - ) - - -class Port(proto.Message): - r"""Represents a network port in a container. - - Attributes: - container_port (int): - The number of the port to expose on the pod's - IP address. Must be a valid port number, between - 1 and 65535 inclusive. - """ - - container_port = proto.Field( - proto.INT32, - number=3, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_deployment_monitoring_job.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_deployment_monitoring_job.py deleted file mode 100644 index dc8b18d321..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_deployment_monitoring_job.py +++ /dev/null @@ -1,441 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec -from google.cloud.aiplatform_v1.types import feature_monitoring_stats -from google.cloud.aiplatform_v1.types import io -from google.cloud.aiplatform_v1.types import job_state -from google.cloud.aiplatform_v1.types import model_monitoring -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'ModelDeploymentMonitoringObjectiveType', - 'ModelDeploymentMonitoringJob', - 'ModelDeploymentMonitoringBigQueryTable', - 'ModelDeploymentMonitoringObjectiveConfig', - 'ModelDeploymentMonitoringScheduleConfig', - 'ModelMonitoringStatsAnomalies', - }, -) - - -class ModelDeploymentMonitoringObjectiveType(proto.Enum): - r"""The Model Monitoring Objective types.""" - MODEL_DEPLOYMENT_MONITORING_OBJECTIVE_TYPE_UNSPECIFIED = 0 - RAW_FEATURE_SKEW = 1 - RAW_FEATURE_DRIFT = 2 - FEATURE_ATTRIBUTION_SKEW = 3 - FEATURE_ATTRIBUTION_DRIFT = 4 - - -class ModelDeploymentMonitoringJob(proto.Message): - r"""Represents a job that runs periodically to monitor the - deployed models in an endpoint. It will analyze the logged - training & prediction data to detect any abnormal behaviors. - - Attributes: - name (str): - Output only. Resource name of a - ModelDeploymentMonitoringJob. - display_name (str): - Required. The user-defined name of the - ModelDeploymentMonitoringJob. The name can be up - to 128 characters long and can be consist of any - UTF-8 characters. - Display name of a ModelDeploymentMonitoringJob. - endpoint (str): - Required. Endpoint resource name. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - state (google.cloud.aiplatform_v1.types.JobState): - Output only. The detailed state of the - monitoring job. When the job is still creating, - the state will be 'PENDING'. Once the job is - successfully created, the state will be - 'RUNNING'. Pause the job, the state will be - 'PAUSED'. - Resume the job, the state will return to - 'RUNNING'. - schedule_state (google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob.MonitoringScheduleState): - Output only. Schedule state when the - monitoring job is in Running state. - model_deployment_monitoring_objective_configs (Sequence[google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringObjectiveConfig]): - Required. The config for monitoring - objectives. This is a per DeployedModel config. - Each DeployedModel needs to be configured - separately. - model_deployment_monitoring_schedule_config (google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringScheduleConfig): - Required. Schedule config for running the - monitoring job. - logging_sampling_strategy (google.cloud.aiplatform_v1.types.SamplingStrategy): - Required. Sample Strategy for logging. - model_monitoring_alert_config (google.cloud.aiplatform_v1.types.ModelMonitoringAlertConfig): - Alert config for model monitoring. - predict_instance_schema_uri (str): - YAML schema file uri describing the format of - a single instance, which are given to format - this Endpoint's prediction (and explanation). If - not set, we will generate predict schema from - collected predict requests. - sample_predict_instance (google.protobuf.struct_pb2.Value): - Sample Predict instance, same format as - [PredictRequest.instances][google.cloud.aiplatform.v1.PredictRequest.instances], - this can be set as a replacement of - [ModelDeploymentMonitoringJob.predict_instance_schema_uri][google.cloud.aiplatform.v1.ModelDeploymentMonitoringJob.predict_instance_schema_uri]. - If not set, we will generate predict schema from collected - predict requests. - analysis_instance_schema_uri (str): - YAML schema file uri describing the format of a single - instance that you want Tensorflow Data Validation (TFDV) to - analyze. - - If this field is empty, all the feature data types are - inferred from - [predict_instance_schema_uri][google.cloud.aiplatform.v1.ModelDeploymentMonitoringJob.predict_instance_schema_uri], - meaning that TFDV will use the data in the exact format(data - type) as prediction request/response. If there are any data - type differences between predict instance and TFDV instance, - this field can be used to override the schema. For models - trained with Vertex AI, this field must be set as all the - fields in predict instance formatted as string. - bigquery_tables (Sequence[google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringBigQueryTable]): - Output only. The created bigquery tables for - the job under customer project. Customer could - do their own query & analysis. There could be 4 - log tables in maximum: - 1. Training data logging predict - request/response 2. Serving data logging predict - request/response - log_ttl (google.protobuf.duration_pb2.Duration): - The TTL of BigQuery tables in user projects - which stores logs. A day is the basic unit of - the TTL and we take the ceil of TTL/86400(a - day). e.g. { second: 3600} indicates ttl = 1 - day. - labels (Sequence[google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob.LabelsEntry]): - The labels with user-defined metadata to - organize your ModelDeploymentMonitoringJob. - - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. - See https://goo.gl/xmQnxf for more information - and examples of labels. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - ModelDeploymentMonitoringJob was created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - ModelDeploymentMonitoringJob was updated most - recently. - next_schedule_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this monitoring - pipeline will be scheduled to run for the next - round. - stats_anomalies_base_directory (google.cloud.aiplatform_v1.types.GcsDestination): - Stats anomalies base folder path. - encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec): - Customer-managed encryption key spec for a - ModelDeploymentMonitoringJob. If set, this - ModelDeploymentMonitoringJob and all sub- - resources of this ModelDeploymentMonitoringJob - will be secured by this key. - enable_monitoring_pipeline_logs (bool): - If true, the scheduled monitoring pipeline logs are sent to - Google Cloud Logging, including pipeline status and - anomalies detected. Please note the logs incur cost, which - are subject to `Cloud Logging - pricing `__. - error (google.rpc.status_pb2.Status): - Output only. Only populated when the job's state is - ``JOB_STATE_FAILED`` or ``JOB_STATE_CANCELLED``. - """ - class MonitoringScheduleState(proto.Enum): - r"""The state to Specify the monitoring pipeline.""" - MONITORING_SCHEDULE_STATE_UNSPECIFIED = 0 - PENDING = 1 - OFFLINE = 2 - RUNNING = 3 - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - endpoint = proto.Field( - proto.STRING, - number=3, - ) - state = proto.Field( - proto.ENUM, - number=4, - enum=job_state.JobState, - ) - schedule_state = proto.Field( - proto.ENUM, - number=5, - enum=MonitoringScheduleState, - ) - model_deployment_monitoring_objective_configs = proto.RepeatedField( - proto.MESSAGE, - number=6, - message='ModelDeploymentMonitoringObjectiveConfig', - ) - model_deployment_monitoring_schedule_config = proto.Field( - proto.MESSAGE, - number=7, - message='ModelDeploymentMonitoringScheduleConfig', - ) - logging_sampling_strategy = proto.Field( - proto.MESSAGE, - number=8, - message=model_monitoring.SamplingStrategy, - ) - model_monitoring_alert_config = proto.Field( - proto.MESSAGE, - number=15, - message=model_monitoring.ModelMonitoringAlertConfig, - ) - predict_instance_schema_uri = proto.Field( - proto.STRING, - number=9, - ) - sample_predict_instance = proto.Field( - proto.MESSAGE, - number=19, - message=struct_pb2.Value, - ) - analysis_instance_schema_uri = proto.Field( - proto.STRING, - number=16, - ) - bigquery_tables = proto.RepeatedField( - proto.MESSAGE, - number=10, - message='ModelDeploymentMonitoringBigQueryTable', - ) - log_ttl = proto.Field( - proto.MESSAGE, - number=17, - message=duration_pb2.Duration, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=11, - ) - create_time = proto.Field( - proto.MESSAGE, - number=12, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=13, - message=timestamp_pb2.Timestamp, - ) - next_schedule_time = proto.Field( - proto.MESSAGE, - number=14, - message=timestamp_pb2.Timestamp, - ) - stats_anomalies_base_directory = proto.Field( - proto.MESSAGE, - number=20, - message=io.GcsDestination, - ) - encryption_spec = proto.Field( - proto.MESSAGE, - number=21, - message=gca_encryption_spec.EncryptionSpec, - ) - enable_monitoring_pipeline_logs = proto.Field( - proto.BOOL, - number=22, - ) - error = proto.Field( - proto.MESSAGE, - number=23, - message=status_pb2.Status, - ) - - -class ModelDeploymentMonitoringBigQueryTable(proto.Message): - r"""ModelDeploymentMonitoringBigQueryTable specifies the BigQuery - table name as well as some information of the logs stored in - this table. - - Attributes: - log_source (google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringBigQueryTable.LogSource): - The source of log. - log_type (google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringBigQueryTable.LogType): - The type of log. - bigquery_table_path (str): - The created BigQuery table to store logs. Customer could do - their own query & analysis. Format: - ``bq://.model_deployment_monitoring_._`` - """ - class LogSource(proto.Enum): - r"""Indicates where does the log come from.""" - LOG_SOURCE_UNSPECIFIED = 0 - TRAINING = 1 - SERVING = 2 - - class LogType(proto.Enum): - r"""Indicates what type of traffic does the log belong to.""" - LOG_TYPE_UNSPECIFIED = 0 - PREDICT = 1 - EXPLAIN = 2 - - log_source = proto.Field( - proto.ENUM, - number=1, - enum=LogSource, - ) - log_type = proto.Field( - proto.ENUM, - number=2, - enum=LogType, - ) - bigquery_table_path = proto.Field( - proto.STRING, - number=3, - ) - - -class ModelDeploymentMonitoringObjectiveConfig(proto.Message): - r"""ModelDeploymentMonitoringObjectiveConfig contains the pair of - deployed_model_id to ModelMonitoringObjectiveConfig. - - Attributes: - deployed_model_id (str): - The DeployedModel ID of the objective config. - objective_config (google.cloud.aiplatform_v1.types.ModelMonitoringObjectiveConfig): - The objective config of for the - modelmonitoring job of this deployed model. - """ - - deployed_model_id = proto.Field( - proto.STRING, - number=1, - ) - objective_config = proto.Field( - proto.MESSAGE, - number=2, - message=model_monitoring.ModelMonitoringObjectiveConfig, - ) - - -class ModelDeploymentMonitoringScheduleConfig(proto.Message): - r"""The config for scheduling monitoring job. - - Attributes: - monitor_interval (google.protobuf.duration_pb2.Duration): - Required. The model monitoring job running - interval. It will be rounded up to next full - hour. - """ - - monitor_interval = proto.Field( - proto.MESSAGE, - number=1, - message=duration_pb2.Duration, - ) - - -class ModelMonitoringStatsAnomalies(proto.Message): - r"""Statistics and anomalies generated by Model Monitoring. - - Attributes: - objective (google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringObjectiveType): - Model Monitoring Objective those stats and - anomalies belonging to. - deployed_model_id (str): - Deployed Model ID. - anomaly_count (int): - Number of anomalies within all stats. - feature_stats (Sequence[google.cloud.aiplatform_v1.types.ModelMonitoringStatsAnomalies.FeatureHistoricStatsAnomalies]): - A list of historical Stats and Anomalies - generated for all Features. - """ - - class FeatureHistoricStatsAnomalies(proto.Message): - r"""Historical Stats (and Anomalies) for a specific Feature. - - Attributes: - feature_display_name (str): - Display Name of the Feature. - threshold (google.cloud.aiplatform_v1.types.ThresholdConfig): - Threshold for anomaly detection. - training_stats (google.cloud.aiplatform_v1.types.FeatureStatsAnomaly): - Stats calculated for the Training Dataset. - prediction_stats (Sequence[google.cloud.aiplatform_v1.types.FeatureStatsAnomaly]): - A list of historical stats generated by - different time window's Prediction Dataset. - """ - - feature_display_name = proto.Field( - proto.STRING, - number=1, - ) - threshold = proto.Field( - proto.MESSAGE, - number=3, - message=model_monitoring.ThresholdConfig, - ) - training_stats = proto.Field( - proto.MESSAGE, - number=4, - message=feature_monitoring_stats.FeatureStatsAnomaly, - ) - prediction_stats = proto.RepeatedField( - proto.MESSAGE, - number=5, - message=feature_monitoring_stats.FeatureStatsAnomaly, - ) - - objective = proto.Field( - proto.ENUM, - number=1, - enum='ModelDeploymentMonitoringObjectiveType', - ) - deployed_model_id = proto.Field( - proto.STRING, - number=2, - ) - anomaly_count = proto.Field( - proto.INT32, - number=3, - ) - feature_stats = proto.RepeatedField( - proto.MESSAGE, - number=4, - message=FeatureHistoricStatsAnomalies, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_evaluation.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_evaluation.py deleted file mode 100644 index ccaad6809c..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_evaluation.py +++ /dev/null @@ -1,99 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1.types import explanation -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'ModelEvaluation', - }, -) - - -class ModelEvaluation(proto.Message): - r"""A collection of metrics calculated by comparing Model's - predictions on all of the test data against annotations from the - test data. - - Attributes: - name (str): - Output only. The resource name of the - ModelEvaluation. - metrics_schema_uri (str): - Output only. Points to a YAML file stored on Google Cloud - Storage describing the - [metrics][google.cloud.aiplatform.v1.ModelEvaluation.metrics] - of this ModelEvaluation. The schema is defined as an OpenAPI - 3.0.2 `Schema - Object `__. - metrics (google.protobuf.struct_pb2.Value): - Output only. Evaluation metrics of the Model. The schema of - the metrics is stored in - [metrics_schema_uri][google.cloud.aiplatform.v1.ModelEvaluation.metrics_schema_uri] - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - ModelEvaluation was created. - slice_dimensions (Sequence[str]): - Output only. All possible - [dimensions][ModelEvaluationSlice.slice.dimension] of - ModelEvaluationSlices. The dimensions can be used as the - filter of the - [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1.ModelService.ListModelEvaluationSlices] - request, in the form of ``slice.dimension = ``. - model_explanation (google.cloud.aiplatform_v1.types.ModelExplanation): - Output only. Aggregated explanation metrics - for the Model's prediction output over the data - this ModelEvaluation uses. This field is - populated only if the Model is evaluated with - explanations, and only for AutoML tabular - Models. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - metrics_schema_uri = proto.Field( - proto.STRING, - number=2, - ) - metrics = proto.Field( - proto.MESSAGE, - number=3, - message=struct_pb2.Value, - ) - create_time = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - slice_dimensions = proto.RepeatedField( - proto.STRING, - number=5, - ) - model_explanation = proto.Field( - proto.MESSAGE, - number=8, - message=explanation.ModelExplanation, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_evaluation_slice.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_evaluation_slice.py deleted file mode 100644 index 9f17990f20..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_evaluation_slice.py +++ /dev/null @@ -1,110 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'ModelEvaluationSlice', - }, -) - - -class ModelEvaluationSlice(proto.Message): - r"""A collection of metrics calculated by comparing Model's - predictions on a slice of the test data against ground truth - annotations. - - Attributes: - name (str): - Output only. The resource name of the - ModelEvaluationSlice. - slice_ (google.cloud.aiplatform_v1.types.ModelEvaluationSlice.Slice): - Output only. The slice of the test data that - is used to evaluate the Model. - metrics_schema_uri (str): - Output only. Points to a YAML file stored on Google Cloud - Storage describing the - [metrics][google.cloud.aiplatform.v1.ModelEvaluationSlice.metrics] - of this ModelEvaluationSlice. The schema is defined as an - OpenAPI 3.0.2 `Schema - Object `__. - metrics (google.protobuf.struct_pb2.Value): - Output only. Sliced evaluation metrics of the Model. The - schema of the metrics is stored in - [metrics_schema_uri][google.cloud.aiplatform.v1.ModelEvaluationSlice.metrics_schema_uri] - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - ModelEvaluationSlice was created. - """ - - class Slice(proto.Message): - r"""Definition of a slice. - - Attributes: - dimension (str): - Output only. The dimension of the slice. Well-known - dimensions are: - - - ``annotationSpec``: This slice is on the test data that - has either ground truth or prediction with - [AnnotationSpec.display_name][google.cloud.aiplatform.v1.AnnotationSpec.display_name] - equals to - [value][google.cloud.aiplatform.v1.ModelEvaluationSlice.Slice.value]. - value (str): - Output only. The value of the dimension in - this slice. - """ - - dimension = proto.Field( - proto.STRING, - number=1, - ) - value = proto.Field( - proto.STRING, - number=2, - ) - - name = proto.Field( - proto.STRING, - number=1, - ) - slice_ = proto.Field( - proto.MESSAGE, - number=2, - message=Slice, - ) - metrics_schema_uri = proto.Field( - proto.STRING, - number=3, - ) - metrics = proto.Field( - proto.MESSAGE, - number=4, - message=struct_pb2.Value, - ) - create_time = proto.Field( - proto.MESSAGE, - number=5, - message=timestamp_pb2.Timestamp, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_monitoring.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_monitoring.py deleted file mode 100644 index 97c01d8327..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_monitoring.py +++ /dev/null @@ -1,398 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1.types import io - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'ModelMonitoringObjectiveConfig', - 'ModelMonitoringAlertConfig', - 'ThresholdConfig', - 'SamplingStrategy', - }, -) - - -class ModelMonitoringObjectiveConfig(proto.Message): - r"""Next ID: 6 - - Attributes: - training_dataset (google.cloud.aiplatform_v1.types.ModelMonitoringObjectiveConfig.TrainingDataset): - Training dataset for models. This field has - to be set only if - TrainingPredictionSkewDetectionConfig is - specified. - training_prediction_skew_detection_config (google.cloud.aiplatform_v1.types.ModelMonitoringObjectiveConfig.TrainingPredictionSkewDetectionConfig): - The config for skew between training data and - prediction data. - prediction_drift_detection_config (google.cloud.aiplatform_v1.types.ModelMonitoringObjectiveConfig.PredictionDriftDetectionConfig): - The config for drift of prediction data. - explanation_config (google.cloud.aiplatform_v1.types.ModelMonitoringObjectiveConfig.ExplanationConfig): - The config for integrating with Vertex - Explainable AI. - """ - - class TrainingDataset(proto.Message): - r"""Training Dataset information. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - dataset (str): - The resource name of the Dataset used to - train this Model. - - This field is a member of `oneof`_ ``data_source``. - gcs_source (google.cloud.aiplatform_v1.types.GcsSource): - The Google Cloud Storage uri of the unmanaged - Dataset used to train this Model. - - This field is a member of `oneof`_ ``data_source``. - bigquery_source (google.cloud.aiplatform_v1.types.BigQuerySource): - The BigQuery table of the unmanaged Dataset - used to train this Model. - - This field is a member of `oneof`_ ``data_source``. - data_format (str): - Data format of the dataset, only applicable - if the input is from Google Cloud Storage. - The possible formats are: - - "tf-record" - The source file is a TFRecord file. - - "csv" - The source file is a CSV file. - target_field (str): - The target field name the model is to - predict. This field will be excluded when doing - Predict and (or) Explain for the training data. - logging_sampling_strategy (google.cloud.aiplatform_v1.types.SamplingStrategy): - Strategy to sample data from Training - Dataset. If not set, we process the whole - dataset. - """ - - dataset = proto.Field( - proto.STRING, - number=3, - oneof='data_source', - ) - gcs_source = proto.Field( - proto.MESSAGE, - number=4, - oneof='data_source', - message=io.GcsSource, - ) - bigquery_source = proto.Field( - proto.MESSAGE, - number=5, - oneof='data_source', - message=io.BigQuerySource, - ) - data_format = proto.Field( - proto.STRING, - number=2, - ) - target_field = proto.Field( - proto.STRING, - number=6, - ) - logging_sampling_strategy = proto.Field( - proto.MESSAGE, - number=7, - message='SamplingStrategy', - ) - - class TrainingPredictionSkewDetectionConfig(proto.Message): - r"""The config for Training & Prediction data skew detection. It - specifies the training dataset sources and the skew detection - parameters. - - Attributes: - skew_thresholds (Sequence[google.cloud.aiplatform_v1.types.ModelMonitoringObjectiveConfig.TrainingPredictionSkewDetectionConfig.SkewThresholdsEntry]): - Key is the feature name and value is the - threshold. If a feature needs to be monitored - for skew, a value threshold must be configured - for that feature. The threshold here is against - feature distribution distance between the - training and prediction feature. - attribution_score_skew_thresholds (Sequence[google.cloud.aiplatform_v1.types.ModelMonitoringObjectiveConfig.TrainingPredictionSkewDetectionConfig.AttributionScoreSkewThresholdsEntry]): - Key is the feature name and value is the - threshold. The threshold here is against - attribution score distance between the training - and prediction feature. - """ - - skew_thresholds = proto.MapField( - proto.STRING, - proto.MESSAGE, - number=1, - message='ThresholdConfig', - ) - attribution_score_skew_thresholds = proto.MapField( - proto.STRING, - proto.MESSAGE, - number=2, - message='ThresholdConfig', - ) - - class PredictionDriftDetectionConfig(proto.Message): - r"""The config for Prediction data drift detection. - - Attributes: - drift_thresholds (Sequence[google.cloud.aiplatform_v1.types.ModelMonitoringObjectiveConfig.PredictionDriftDetectionConfig.DriftThresholdsEntry]): - Key is the feature name and value is the - threshold. If a feature needs to be monitored - for drift, a value threshold must be configured - for that feature. The threshold here is against - feature distribution distance between different - time windws. - attribution_score_drift_thresholds (Sequence[google.cloud.aiplatform_v1.types.ModelMonitoringObjectiveConfig.PredictionDriftDetectionConfig.AttributionScoreDriftThresholdsEntry]): - Key is the feature name and value is the - threshold. The threshold here is against - attribution score distance between different - time windows. - """ - - drift_thresholds = proto.MapField( - proto.STRING, - proto.MESSAGE, - number=1, - message='ThresholdConfig', - ) - attribution_score_drift_thresholds = proto.MapField( - proto.STRING, - proto.MESSAGE, - number=2, - message='ThresholdConfig', - ) - - class ExplanationConfig(proto.Message): - r"""The config for integrating with Vertex Explainable AI. Only - applicable if the Model has explanation_spec populated. - - Attributes: - enable_feature_attributes (bool): - If want to analyze the Vertex Explainable AI - feature attribute scores or not. If set to true, - Vertex AI will log the feature attributions from - explain response and do the skew/drift detection - for them. - explanation_baseline (google.cloud.aiplatform_v1.types.ModelMonitoringObjectiveConfig.ExplanationConfig.ExplanationBaseline): - Predictions generated by the - BatchPredictionJob using baseline dataset. - """ - - class ExplanationBaseline(proto.Message): - r"""Output from - [BatchPredictionJob][google.cloud.aiplatform.v1.BatchPredictionJob] - for Model Monitoring baseline dataset, which can be used to generate - baseline attribution scores. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - gcs (google.cloud.aiplatform_v1.types.GcsDestination): - Cloud Storage location for BatchExplain - output. - - This field is a member of `oneof`_ ``destination``. - bigquery (google.cloud.aiplatform_v1.types.BigQueryDestination): - BigQuery location for BatchExplain output. - - This field is a member of `oneof`_ ``destination``. - prediction_format (google.cloud.aiplatform_v1.types.ModelMonitoringObjectiveConfig.ExplanationConfig.ExplanationBaseline.PredictionFormat): - The storage format of the predictions - generated BatchPrediction job. - """ - class PredictionFormat(proto.Enum): - r"""The storage format of the predictions generated - BatchPrediction job. - """ - PREDICTION_FORMAT_UNSPECIFIED = 0 - JSONL = 2 - BIGQUERY = 3 - - gcs = proto.Field( - proto.MESSAGE, - number=2, - oneof='destination', - message=io.GcsDestination, - ) - bigquery = proto.Field( - proto.MESSAGE, - number=3, - oneof='destination', - message=io.BigQueryDestination, - ) - prediction_format = proto.Field( - proto.ENUM, - number=1, - enum='ModelMonitoringObjectiveConfig.ExplanationConfig.ExplanationBaseline.PredictionFormat', - ) - - enable_feature_attributes = proto.Field( - proto.BOOL, - number=1, - ) - explanation_baseline = proto.Field( - proto.MESSAGE, - number=2, - message='ModelMonitoringObjectiveConfig.ExplanationConfig.ExplanationBaseline', - ) - - training_dataset = proto.Field( - proto.MESSAGE, - number=1, - message=TrainingDataset, - ) - training_prediction_skew_detection_config = proto.Field( - proto.MESSAGE, - number=2, - message=TrainingPredictionSkewDetectionConfig, - ) - prediction_drift_detection_config = proto.Field( - proto.MESSAGE, - number=3, - message=PredictionDriftDetectionConfig, - ) - explanation_config = proto.Field( - proto.MESSAGE, - number=5, - message=ExplanationConfig, - ) - - -class ModelMonitoringAlertConfig(proto.Message): - r"""Next ID: 3 - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - email_alert_config (google.cloud.aiplatform_v1.types.ModelMonitoringAlertConfig.EmailAlertConfig): - Email alert config. - - This field is a member of `oneof`_ ``alert``. - enable_logging (bool): - Dump the anomalies to Cloud Logging. The anomalies will be - put to json payload encoded from proto - [google.cloud.aiplatform.logging.ModelMonitoringAnomaliesLogEntry][]. - This can be further sinked to Pub/Sub or any other services - supported by Cloud Logging. - """ - - class EmailAlertConfig(proto.Message): - r"""The config for email alert. - - Attributes: - user_emails (Sequence[str]): - The email addresses to send the alert. - """ - - user_emails = proto.RepeatedField( - proto.STRING, - number=1, - ) - - email_alert_config = proto.Field( - proto.MESSAGE, - number=1, - oneof='alert', - message=EmailAlertConfig, - ) - enable_logging = proto.Field( - proto.BOOL, - number=2, - ) - - -class ThresholdConfig(proto.Message): - r"""The config for feature monitoring threshold. - Next ID: 3 - - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - value (float): - Specify a threshold value that can trigger - the alert. If this threshold config is for - feature distribution distance: 1. For - categorical feature, the distribution distance - is calculated by L-inifinity norm. - 2. For numerical feature, the distribution - distance is calculated by Jensen–Shannon - divergence. - Each feature must have a non-zero threshold if - they need to be monitored. Otherwise no alert - will be triggered for that feature. - - This field is a member of `oneof`_ ``threshold``. - """ - - value = proto.Field( - proto.DOUBLE, - number=1, - oneof='threshold', - ) - - -class SamplingStrategy(proto.Message): - r"""Sampling Strategy for logging, can be for both training and - prediction dataset. - Next ID: 2 - - Attributes: - random_sample_config (google.cloud.aiplatform_v1.types.SamplingStrategy.RandomSampleConfig): - Random sample config. Will support more - sampling strategies later. - """ - - class RandomSampleConfig(proto.Message): - r"""Requests are randomly selected. - - Attributes: - sample_rate (float): - Sample rate (0, 1] - """ - - sample_rate = proto.Field( - proto.DOUBLE, - number=1, - ) - - random_sample_config = proto.Field( - proto.MESSAGE, - number=1, - message=RandomSampleConfig, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_service.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_service.py deleted file mode 100644 index 138fd4c70b..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/model_service.py +++ /dev/null @@ -1,585 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1.types import io -from google.cloud.aiplatform_v1.types import model as gca_model -from google.cloud.aiplatform_v1.types import model_evaluation -from google.cloud.aiplatform_v1.types import model_evaluation_slice -from google.cloud.aiplatform_v1.types import operation -from google.protobuf import field_mask_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'UploadModelRequest', - 'UploadModelOperationMetadata', - 'UploadModelResponse', - 'GetModelRequest', - 'ListModelsRequest', - 'ListModelsResponse', - 'UpdateModelRequest', - 'DeleteModelRequest', - 'ExportModelRequest', - 'ExportModelOperationMetadata', - 'ExportModelResponse', - 'GetModelEvaluationRequest', - 'ListModelEvaluationsRequest', - 'ListModelEvaluationsResponse', - 'GetModelEvaluationSliceRequest', - 'ListModelEvaluationSlicesRequest', - 'ListModelEvaluationSlicesResponse', - }, -) - - -class UploadModelRequest(proto.Message): - r"""Request message for - [ModelService.UploadModel][google.cloud.aiplatform.v1.ModelService.UploadModel]. - - Attributes: - parent (str): - Required. The resource name of the Location into which to - upload the Model. Format: - ``projects/{project}/locations/{location}`` - model (google.cloud.aiplatform_v1.types.Model): - Required. The Model to create. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - model = proto.Field( - proto.MESSAGE, - number=2, - message=gca_model.Model, - ) - - -class UploadModelOperationMetadata(proto.Message): - r"""Details of - [ModelService.UploadModel][google.cloud.aiplatform.v1.ModelService.UploadModel] - operation. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): - The common part of the operation metadata. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class UploadModelResponse(proto.Message): - r"""Response message of - [ModelService.UploadModel][google.cloud.aiplatform.v1.ModelService.UploadModel] - operation. - - Attributes: - model (str): - The name of the uploaded Model resource. Format: - ``projects/{project}/locations/{location}/models/{model}`` - """ - - model = proto.Field( - proto.STRING, - number=1, - ) - - -class GetModelRequest(proto.Message): - r"""Request message for - [ModelService.GetModel][google.cloud.aiplatform.v1.ModelService.GetModel]. - - Attributes: - name (str): - Required. The name of the Model resource. Format: - ``projects/{project}/locations/{location}/models/{model}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListModelsRequest(proto.Message): - r"""Request message for - [ModelService.ListModels][google.cloud.aiplatform.v1.ModelService.ListModels]. - - Attributes: - parent (str): - Required. The resource name of the Location to list the - Models from. Format: - ``projects/{project}/locations/{location}`` - filter (str): - An expression for filtering the results of the request. For - field names both snake_case and camelCase are supported. - - - ``model`` supports = and !=. ``model`` represents the - Model ID, i.e. the last segment of the Model's [resource - name][google.cloud.aiplatform.v1.Model.name]. - - ``display_name`` supports = and != - - ``labels`` supports general map functions that is: - - - ``labels.key=value`` - key:value equality - - \`labels.key:\* or labels:key - key existence - - A key including a space must be quoted. - ``labels."a key"``. - - Some examples: - - - ``model=1234`` - - ``displayName="myDisplayName"`` - - ``labels.myKey="myValue"`` - page_size (int): - The standard list page size. - page_token (str): - The standard list page token. Typically obtained via - [ListModelsResponse.next_page_token][google.cloud.aiplatform.v1.ListModelsResponse.next_page_token] - of the previous - [ModelService.ListModels][google.cloud.aiplatform.v1.ModelService.ListModels] - call. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - order_by (str): - A comma-separated list of fields to order by, sorted in - ascending order. Use "desc" after a field name for - descending. Supported fields: - - - ``display_name`` - - ``create_time`` - - ``update_time`` - - Example: ``display_name, create_time desc``. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=5, - message=field_mask_pb2.FieldMask, - ) - order_by = proto.Field( - proto.STRING, - number=6, - ) - - -class ListModelsResponse(proto.Message): - r"""Response message for - [ModelService.ListModels][google.cloud.aiplatform.v1.ModelService.ListModels] - - Attributes: - models (Sequence[google.cloud.aiplatform_v1.types.Model]): - List of Models in the requested page. - next_page_token (str): - A token to retrieve next page of results. Pass to - [ListModelsRequest.page_token][google.cloud.aiplatform.v1.ListModelsRequest.page_token] - to obtain that page. - """ - - @property - def raw_page(self): - return self - - models = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_model.Model, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class UpdateModelRequest(proto.Message): - r"""Request message for - [ModelService.UpdateModel][google.cloud.aiplatform.v1.ModelService.UpdateModel]. - - Attributes: - model (google.cloud.aiplatform_v1.types.Model): - Required. The Model which replaces the - resource on the server. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The update mask applies to the resource. For the - ``FieldMask`` definition, see - [google.protobuf.FieldMask][google.protobuf.FieldMask]. - """ - - model = proto.Field( - proto.MESSAGE, - number=1, - message=gca_model.Model, - ) - update_mask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - - -class DeleteModelRequest(proto.Message): - r"""Request message for - [ModelService.DeleteModel][google.cloud.aiplatform.v1.ModelService.DeleteModel]. - - Attributes: - name (str): - Required. The name of the Model resource to be deleted. - Format: - ``projects/{project}/locations/{location}/models/{model}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ExportModelRequest(proto.Message): - r"""Request message for - [ModelService.ExportModel][google.cloud.aiplatform.v1.ModelService.ExportModel]. - - Attributes: - name (str): - Required. The resource name of the Model to - export. - output_config (google.cloud.aiplatform_v1.types.ExportModelRequest.OutputConfig): - Required. The desired output location and - configuration. - """ - - class OutputConfig(proto.Message): - r"""Output configuration for the Model export. - - Attributes: - export_format_id (str): - The ID of the format in which the Model must be exported. - Each Model lists the [export formats it - supports][google.cloud.aiplatform.v1.Model.supported_export_formats]. - If no value is provided here, then the first from the list - of the Model's supported formats is used by default. - artifact_destination (google.cloud.aiplatform_v1.types.GcsDestination): - The Cloud Storage location where the Model artifact is to be - written to. Under the directory given as the destination a - new one with name - "``model-export--``", - where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 - format, will be created. Inside, the Model and any of its - supporting files will be written. This field should only be - set when the ``exportableContent`` field of the - [Model.supported_export_formats] object contains - ``ARTIFACT``. - image_destination (google.cloud.aiplatform_v1.types.ContainerRegistryDestination): - The Google Container Registry or Artifact Registry uri where - the Model container image will be copied to. This field - should only be set when the ``exportableContent`` field of - the [Model.supported_export_formats] object contains - ``IMAGE``. - """ - - export_format_id = proto.Field( - proto.STRING, - number=1, - ) - artifact_destination = proto.Field( - proto.MESSAGE, - number=3, - message=io.GcsDestination, - ) - image_destination = proto.Field( - proto.MESSAGE, - number=4, - message=io.ContainerRegistryDestination, - ) - - name = proto.Field( - proto.STRING, - number=1, - ) - output_config = proto.Field( - proto.MESSAGE, - number=2, - message=OutputConfig, - ) - - -class ExportModelOperationMetadata(proto.Message): - r"""Details of - [ModelService.ExportModel][google.cloud.aiplatform.v1.ModelService.ExportModel] - operation. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): - The common part of the operation metadata. - output_info (google.cloud.aiplatform_v1.types.ExportModelOperationMetadata.OutputInfo): - Output only. Information further describing - the output of this Model export. - """ - - class OutputInfo(proto.Message): - r"""Further describes the output of the ExportModel. Supplements - [ExportModelRequest.OutputConfig][google.cloud.aiplatform.v1.ExportModelRequest.OutputConfig]. - - Attributes: - artifact_output_uri (str): - Output only. If the Model artifact is being - exported to Google Cloud Storage this is the - full path of the directory created, into which - the Model files are being written to. - image_output_uri (str): - Output only. If the Model image is being - exported to Google Container Registry or - Artifact Registry this is the full path of the - image created. - """ - - artifact_output_uri = proto.Field( - proto.STRING, - number=2, - ) - image_output_uri = proto.Field( - proto.STRING, - number=3, - ) - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - output_info = proto.Field( - proto.MESSAGE, - number=2, - message=OutputInfo, - ) - - -class ExportModelResponse(proto.Message): - r"""Response message of - [ModelService.ExportModel][google.cloud.aiplatform.v1.ModelService.ExportModel] - operation. - - """ - - -class GetModelEvaluationRequest(proto.Message): - r"""Request message for - [ModelService.GetModelEvaluation][google.cloud.aiplatform.v1.ModelService.GetModelEvaluation]. - - Attributes: - name (str): - Required. The name of the ModelEvaluation resource. Format: - ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListModelEvaluationsRequest(proto.Message): - r"""Request message for - [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1.ModelService.ListModelEvaluations]. - - Attributes: - parent (str): - Required. The resource name of the Model to list the - ModelEvaluations from. Format: - ``projects/{project}/locations/{location}/models/{model}`` - filter (str): - The standard list filter. - page_size (int): - The standard list page size. - page_token (str): - The standard list page token. Typically obtained via - [ListModelEvaluationsResponse.next_page_token][google.cloud.aiplatform.v1.ListModelEvaluationsResponse.next_page_token] - of the previous - [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1.ModelService.ListModelEvaluations] - call. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=5, - message=field_mask_pb2.FieldMask, - ) - - -class ListModelEvaluationsResponse(proto.Message): - r"""Response message for - [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1.ModelService.ListModelEvaluations]. - - Attributes: - model_evaluations (Sequence[google.cloud.aiplatform_v1.types.ModelEvaluation]): - List of ModelEvaluations in the requested - page. - next_page_token (str): - A token to retrieve next page of results. Pass to - [ListModelEvaluationsRequest.page_token][google.cloud.aiplatform.v1.ListModelEvaluationsRequest.page_token] - to obtain that page. - """ - - @property - def raw_page(self): - return self - - model_evaluations = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=model_evaluation.ModelEvaluation, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class GetModelEvaluationSliceRequest(proto.Message): - r"""Request message for - [ModelService.GetModelEvaluationSlice][google.cloud.aiplatform.v1.ModelService.GetModelEvaluationSlice]. - - Attributes: - name (str): - Required. The name of the ModelEvaluationSlice resource. - Format: - ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListModelEvaluationSlicesRequest(proto.Message): - r"""Request message for - [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1.ModelService.ListModelEvaluationSlices]. - - Attributes: - parent (str): - Required. The resource name of the ModelEvaluation to list - the ModelEvaluationSlices from. Format: - ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}`` - filter (str): - The standard list filter. - - - ``slice.dimension`` - for =. - page_size (int): - The standard list page size. - page_token (str): - The standard list page token. Typically obtained via - [ListModelEvaluationSlicesResponse.next_page_token][google.cloud.aiplatform.v1.ListModelEvaluationSlicesResponse.next_page_token] - of the previous - [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1.ModelService.ListModelEvaluationSlices] - call. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=5, - message=field_mask_pb2.FieldMask, - ) - - -class ListModelEvaluationSlicesResponse(proto.Message): - r"""Response message for - [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1.ModelService.ListModelEvaluationSlices]. - - Attributes: - model_evaluation_slices (Sequence[google.cloud.aiplatform_v1.types.ModelEvaluationSlice]): - List of ModelEvaluations in the requested - page. - next_page_token (str): - A token to retrieve next page of results. Pass to - [ListModelEvaluationSlicesRequest.page_token][google.cloud.aiplatform.v1.ListModelEvaluationSlicesRequest.page_token] - to obtain that page. - """ - - @property - def raw_page(self): - return self - - model_evaluation_slices = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=model_evaluation_slice.ModelEvaluationSlice, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/operation.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/operation.py deleted file mode 100644 index 12470f2164..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/operation.py +++ /dev/null @@ -1,83 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'GenericOperationMetadata', - 'DeleteOperationMetadata', - }, -) - - -class GenericOperationMetadata(proto.Message): - r"""Generic Metadata shared by all operations. - - Attributes: - partial_failures (Sequence[google.rpc.status_pb2.Status]): - Output only. Partial failures encountered. - E.g. single files that couldn't be read. - This field should never exceed 20 entries. - Status details field will contain standard GCP - error details. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the operation was - created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the operation was - updated for the last time. If the operation has - finished (successfully or not), this is the - finish time. - """ - - partial_failures = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=status_pb2.Status, - ) - create_time = proto.Field( - proto.MESSAGE, - number=2, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - - -class DeleteOperationMetadata(proto.Message): - r"""Details of operations that perform deletes of any entities. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): - The common part of the operation metadata. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message='GenericOperationMetadata', - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/pipeline_job.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/pipeline_job.py deleted file mode 100644 index fb1d135d5f..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/pipeline_job.py +++ /dev/null @@ -1,518 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1.types import artifact -from google.cloud.aiplatform_v1.types import context -from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec -from google.cloud.aiplatform_v1.types import execution as gca_execution -from google.cloud.aiplatform_v1.types import pipeline_state -from google.cloud.aiplatform_v1.types import value as gca_value -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'PipelineJob', - 'PipelineJobDetail', - 'PipelineTaskDetail', - 'PipelineTaskExecutorDetail', - }, -) - - -class PipelineJob(proto.Message): - r"""An instance of a machine learning PipelineJob. - - Attributes: - name (str): - Output only. The resource name of the - PipelineJob. - display_name (str): - The display name of the Pipeline. - The name can be up to 128 characters long and - can be consist of any UTF-8 characters. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Pipeline creation time. - start_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Pipeline start time. - end_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Pipeline end time. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this PipelineJob - was most recently updated. - pipeline_spec (google.protobuf.struct_pb2.Struct): - Required. The spec of the pipeline. - state (google.cloud.aiplatform_v1.types.PipelineState): - Output only. The detailed state of the job. - job_detail (google.cloud.aiplatform_v1.types.PipelineJobDetail): - Output only. The details of pipeline run. Not - available in the list view. - error (google.rpc.status_pb2.Status): - Output only. The error that occurred during - pipeline execution. Only populated when the - pipeline's state is FAILED or CANCELLED. - labels (Sequence[google.cloud.aiplatform_v1.types.PipelineJob.LabelsEntry]): - The labels with user-defined metadata to - organize PipelineJob. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. - See https://goo.gl/xmQnxf for more information - and examples of labels. - runtime_config (google.cloud.aiplatform_v1.types.PipelineJob.RuntimeConfig): - Runtime config of the pipeline. - encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec): - Customer-managed encryption key spec for a - pipelineJob. If set, this PipelineJob and all of - its sub-resources will be secured by this key. - service_account (str): - The service account that the pipeline workload runs as. If - not specified, the Compute Engine default service account in - the project will be used. See - https://cloud.google.com/compute/docs/access/service-accounts#default_service_account - - Users starting the pipeline must have the - ``iam.serviceAccounts.actAs`` permission on this service - account. - network (str): - The full name of the Compute Engine - `network `__ - to which the Pipeline Job's workload should be peered. For - example, ``projects/12345/global/networks/myVPC``. - `Format `__ - is of the form - ``projects/{project}/global/networks/{network}``. Where - {project} is a project number, as in ``12345``, and - {network} is a network name. - - Private services access must already be configured for the - network. Pipeline job will apply the network configuration - to the GCP resources being launched, if applied, such as - Vertex AI Training or Dataflow job. If left unspecified, the - workload is not peered with any network. - """ - - class RuntimeConfig(proto.Message): - r"""The runtime config of a PipelineJob. - - Attributes: - parameters (Sequence[google.cloud.aiplatform_v1.types.PipelineJob.RuntimeConfig.ParametersEntry]): - Deprecated. Use - [RuntimeConfig.parameter_values][google.cloud.aiplatform.v1.PipelineJob.RuntimeConfig.parameter_values] - instead. The runtime parameters of the PipelineJob. The - parameters will be passed into - [PipelineJob.pipeline_spec][google.cloud.aiplatform.v1.PipelineJob.pipeline_spec] - to replace the placeholders at runtime. This field is used - by pipelines built using - ``PipelineJob.pipeline_spec.schema_version`` 2.0.0 or lower, - such as pipelines built using Kubeflow Pipelines SDK 1.8 or - lower. - gcs_output_directory (str): - Required. A path in a Cloud Storage bucket, which will be - treated as the root output directory of the pipeline. It is - used by the system to generate the paths of output - artifacts. The artifact paths are generated with a sub-path - pattern ``{job_id}/{task_id}/{output_key}`` under the - specified output directory. The service account specified in - this pipeline must have the ``storage.objects.get`` and - ``storage.objects.create`` permissions for this bucket. - parameter_values (Sequence[google.cloud.aiplatform_v1.types.PipelineJob.RuntimeConfig.ParameterValuesEntry]): - The runtime parameters of the PipelineJob. The parameters - will be passed into - [PipelineJob.pipeline_spec][google.cloud.aiplatform.v1.PipelineJob.pipeline_spec] - to replace the placeholders at runtime. This field is used - by pipelines built using - ``PipelineJob.pipeline_spec.schema_version`` 2.1.0, such as - pipelines built using Kubeflow Pipelines SDK 1.9 or higher - and the v2 DSL. - """ - - parameters = proto.MapField( - proto.STRING, - proto.MESSAGE, - number=1, - message=gca_value.Value, - ) - gcs_output_directory = proto.Field( - proto.STRING, - number=2, - ) - parameter_values = proto.MapField( - proto.STRING, - proto.MESSAGE, - number=3, - message=struct_pb2.Value, - ) - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - create_time = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - start_time = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - end_time = proto.Field( - proto.MESSAGE, - number=5, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=6, - message=timestamp_pb2.Timestamp, - ) - pipeline_spec = proto.Field( - proto.MESSAGE, - number=7, - message=struct_pb2.Struct, - ) - state = proto.Field( - proto.ENUM, - number=8, - enum=pipeline_state.PipelineState, - ) - job_detail = proto.Field( - proto.MESSAGE, - number=9, - message='PipelineJobDetail', - ) - error = proto.Field( - proto.MESSAGE, - number=10, - message=status_pb2.Status, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=11, - ) - runtime_config = proto.Field( - proto.MESSAGE, - number=12, - message=RuntimeConfig, - ) - encryption_spec = proto.Field( - proto.MESSAGE, - number=16, - message=gca_encryption_spec.EncryptionSpec, - ) - service_account = proto.Field( - proto.STRING, - number=17, - ) - network = proto.Field( - proto.STRING, - number=18, - ) - - -class PipelineJobDetail(proto.Message): - r"""The runtime detail of PipelineJob. - - Attributes: - pipeline_context (google.cloud.aiplatform_v1.types.Context): - Output only. The context of the pipeline. - pipeline_run_context (google.cloud.aiplatform_v1.types.Context): - Output only. The context of the current - pipeline run. - task_details (Sequence[google.cloud.aiplatform_v1.types.PipelineTaskDetail]): - Output only. The runtime details of the tasks - under the pipeline. - """ - - pipeline_context = proto.Field( - proto.MESSAGE, - number=1, - message=context.Context, - ) - pipeline_run_context = proto.Field( - proto.MESSAGE, - number=2, - message=context.Context, - ) - task_details = proto.RepeatedField( - proto.MESSAGE, - number=3, - message='PipelineTaskDetail', - ) - - -class PipelineTaskDetail(proto.Message): - r"""The runtime detail of a task execution. - - Attributes: - task_id (int): - Output only. The system generated ID of the - task. - parent_task_id (int): - Output only. The id of the parent task if the - task is within a component scope. Empty if the - task is at the root level. - task_name (str): - Output only. The user specified name of the task that is - defined in [PipelineJob.spec][]. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Task create time. - start_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Task start time. - end_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Task end time. - executor_detail (google.cloud.aiplatform_v1.types.PipelineTaskExecutorDetail): - Output only. The detailed execution info. - state (google.cloud.aiplatform_v1.types.PipelineTaskDetail.State): - Output only. State of the task. - execution (google.cloud.aiplatform_v1.types.Execution): - Output only. The execution metadata of the - task. - error (google.rpc.status_pb2.Status): - Output only. The error that occurred during - task execution. Only populated when the task's - state is FAILED or CANCELLED. - pipeline_task_status (Sequence[google.cloud.aiplatform_v1.types.PipelineTaskDetail.PipelineTaskStatus]): - Output only. A list of task status. This - field keeps a record of task status evolving - over time. - inputs (Sequence[google.cloud.aiplatform_v1.types.PipelineTaskDetail.InputsEntry]): - Output only. The runtime input artifacts of - the task. - outputs (Sequence[google.cloud.aiplatform_v1.types.PipelineTaskDetail.OutputsEntry]): - Output only. The runtime output artifacts of - the task. - """ - class State(proto.Enum): - r"""Specifies state of TaskExecution""" - STATE_UNSPECIFIED = 0 - PENDING = 1 - RUNNING = 2 - SUCCEEDED = 3 - CANCEL_PENDING = 4 - CANCELLING = 5 - CANCELLED = 6 - FAILED = 7 - SKIPPED = 8 - NOT_TRIGGERED = 9 - - class PipelineTaskStatus(proto.Message): - r"""A single record of the task status. - - Attributes: - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Update time of this status. - state (google.cloud.aiplatform_v1.types.PipelineTaskDetail.State): - Output only. The state of the task. - error (google.rpc.status_pb2.Status): - Output only. The error that occurred during - the state. May be set when the state is any of - the non-final state (PENDING/RUNNING/CANCELLING) - or FAILED state. If the state is FAILED, the - error here is final and not going to be retried. - If the state is a non-final state, the error - indicates a system-error being retried. - """ - - update_time = proto.Field( - proto.MESSAGE, - number=1, - message=timestamp_pb2.Timestamp, - ) - state = proto.Field( - proto.ENUM, - number=2, - enum='PipelineTaskDetail.State', - ) - error = proto.Field( - proto.MESSAGE, - number=3, - message=status_pb2.Status, - ) - - class ArtifactList(proto.Message): - r"""A list of artifact metadata. - - Attributes: - artifacts (Sequence[google.cloud.aiplatform_v1.types.Artifact]): - Output only. A list of artifact metadata. - """ - - artifacts = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=artifact.Artifact, - ) - - task_id = proto.Field( - proto.INT64, - number=1, - ) - parent_task_id = proto.Field( - proto.INT64, - number=12, - ) - task_name = proto.Field( - proto.STRING, - number=2, - ) - create_time = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - start_time = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - end_time = proto.Field( - proto.MESSAGE, - number=5, - message=timestamp_pb2.Timestamp, - ) - executor_detail = proto.Field( - proto.MESSAGE, - number=6, - message='PipelineTaskExecutorDetail', - ) - state = proto.Field( - proto.ENUM, - number=7, - enum=State, - ) - execution = proto.Field( - proto.MESSAGE, - number=8, - message=gca_execution.Execution, - ) - error = proto.Field( - proto.MESSAGE, - number=9, - message=status_pb2.Status, - ) - pipeline_task_status = proto.RepeatedField( - proto.MESSAGE, - number=13, - message=PipelineTaskStatus, - ) - inputs = proto.MapField( - proto.STRING, - proto.MESSAGE, - number=10, - message=ArtifactList, - ) - outputs = proto.MapField( - proto.STRING, - proto.MESSAGE, - number=11, - message=ArtifactList, - ) - - -class PipelineTaskExecutorDetail(proto.Message): - r"""The runtime detail of a pipeline executor. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - container_detail (google.cloud.aiplatform_v1.types.PipelineTaskExecutorDetail.ContainerDetail): - Output only. The detailed info for a - container executor. - - This field is a member of `oneof`_ ``details``. - custom_job_detail (google.cloud.aiplatform_v1.types.PipelineTaskExecutorDetail.CustomJobDetail): - Output only. The detailed info for a custom - job executor. - - This field is a member of `oneof`_ ``details``. - """ - - class ContainerDetail(proto.Message): - r"""The detail of a container execution. It contains the job - names of the lifecycle of a container execution. - - Attributes: - main_job (str): - Output only. The name of the - [CustomJob][google.cloud.aiplatform.v1.CustomJob] for the - main container execution. - pre_caching_check_job (str): - Output only. The name of the - [CustomJob][google.cloud.aiplatform.v1.CustomJob] for the - pre-caching-check container execution. This job will be - available if the - [PipelineJob.pipeline_spec][google.cloud.aiplatform.v1.PipelineJob.pipeline_spec] - specifies the ``pre_caching_check`` hook in the lifecycle - events. - """ - - main_job = proto.Field( - proto.STRING, - number=1, - ) - pre_caching_check_job = proto.Field( - proto.STRING, - number=2, - ) - - class CustomJobDetail(proto.Message): - r"""The detailed info for a custom job executor. - - Attributes: - job (str): - Output only. The name of the - [CustomJob][google.cloud.aiplatform.v1.CustomJob]. - """ - - job = proto.Field( - proto.STRING, - number=1, - ) - - container_detail = proto.Field( - proto.MESSAGE, - number=1, - oneof='details', - message=ContainerDetail, - ) - custom_job_detail = proto.Field( - proto.MESSAGE, - number=2, - oneof='details', - message=CustomJobDetail, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/pipeline_service.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/pipeline_service.py deleted file mode 100644 index 2b6487db0a..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/pipeline_service.py +++ /dev/null @@ -1,412 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job -from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline -from google.protobuf import field_mask_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'CreateTrainingPipelineRequest', - 'GetTrainingPipelineRequest', - 'ListTrainingPipelinesRequest', - 'ListTrainingPipelinesResponse', - 'DeleteTrainingPipelineRequest', - 'CancelTrainingPipelineRequest', - 'CreatePipelineJobRequest', - 'GetPipelineJobRequest', - 'ListPipelineJobsRequest', - 'ListPipelineJobsResponse', - 'DeletePipelineJobRequest', - 'CancelPipelineJobRequest', - }, -) - - -class CreateTrainingPipelineRequest(proto.Message): - r"""Request message for - [PipelineService.CreateTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.CreateTrainingPipeline]. - - Attributes: - parent (str): - Required. The resource name of the Location to create the - TrainingPipeline in. Format: - ``projects/{project}/locations/{location}`` - training_pipeline (google.cloud.aiplatform_v1.types.TrainingPipeline): - Required. The TrainingPipeline to create. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - training_pipeline = proto.Field( - proto.MESSAGE, - number=2, - message=gca_training_pipeline.TrainingPipeline, - ) - - -class GetTrainingPipelineRequest(proto.Message): - r"""Request message for - [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.GetTrainingPipeline]. - - Attributes: - name (str): - Required. The name of the TrainingPipeline resource. Format: - ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListTrainingPipelinesRequest(proto.Message): - r"""Request message for - [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1.PipelineService.ListTrainingPipelines]. - - Attributes: - parent (str): - Required. The resource name of the Location to list the - TrainingPipelines from. Format: - ``projects/{project}/locations/{location}`` - filter (str): - The standard list filter. Supported fields: - - - ``display_name`` supports = and !=. - - - ``state`` supports = and !=. - - Some examples of using the filter are: - - - ``state="PIPELINE_STATE_SUCCEEDED" AND display_name="my_pipeline"`` - - - ``state="PIPELINE_STATE_RUNNING" OR display_name="my_pipeline"`` - - - ``NOT display_name="my_pipeline"`` - - - ``state="PIPELINE_STATE_FAILED"`` - page_size (int): - The standard list page size. - page_token (str): - The standard list page token. Typically obtained via - [ListTrainingPipelinesResponse.next_page_token][google.cloud.aiplatform.v1.ListTrainingPipelinesResponse.next_page_token] - of the previous - [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1.PipelineService.ListTrainingPipelines] - call. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=5, - message=field_mask_pb2.FieldMask, - ) - - -class ListTrainingPipelinesResponse(proto.Message): - r"""Response message for - [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1.PipelineService.ListTrainingPipelines] - - Attributes: - training_pipelines (Sequence[google.cloud.aiplatform_v1.types.TrainingPipeline]): - List of TrainingPipelines in the requested - page. - next_page_token (str): - A token to retrieve the next page of results. Pass to - [ListTrainingPipelinesRequest.page_token][google.cloud.aiplatform.v1.ListTrainingPipelinesRequest.page_token] - to obtain that page. - """ - - @property - def raw_page(self): - return self - - training_pipelines = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_training_pipeline.TrainingPipeline, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class DeleteTrainingPipelineRequest(proto.Message): - r"""Request message for - [PipelineService.DeleteTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.DeleteTrainingPipeline]. - - Attributes: - name (str): - Required. The name of the TrainingPipeline resource to be - deleted. Format: - ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class CancelTrainingPipelineRequest(proto.Message): - r"""Request message for - [PipelineService.CancelTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.CancelTrainingPipeline]. - - Attributes: - name (str): - Required. The name of the TrainingPipeline to cancel. - Format: - ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class CreatePipelineJobRequest(proto.Message): - r"""Request message for - [PipelineService.CreatePipelineJob][google.cloud.aiplatform.v1.PipelineService.CreatePipelineJob]. - - Attributes: - parent (str): - Required. The resource name of the Location to create the - PipelineJob in. Format: - ``projects/{project}/locations/{location}`` - pipeline_job (google.cloud.aiplatform_v1.types.PipelineJob): - Required. The PipelineJob to create. - pipeline_job_id (str): - The ID to use for the PipelineJob, which will become the - final component of the PipelineJob name. If not provided, an - ID will be automatically generated. - - This value should be less than 128 characters, and valid - characters are /[a-z][0-9]-/. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - pipeline_job = proto.Field( - proto.MESSAGE, - number=2, - message=gca_pipeline_job.PipelineJob, - ) - pipeline_job_id = proto.Field( - proto.STRING, - number=3, - ) - - -class GetPipelineJobRequest(proto.Message): - r"""Request message for - [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1.PipelineService.GetPipelineJob]. - - Attributes: - name (str): - Required. The name of the PipelineJob resource. Format: - ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListPipelineJobsRequest(proto.Message): - r"""Request message for - [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1.PipelineService.ListPipelineJobs]. - - Attributes: - parent (str): - Required. The resource name of the Location to list the - PipelineJobs from. Format: - ``projects/{project}/locations/{location}`` - filter (str): - Lists the PipelineJobs that match the filter expression. The - following fields are supported: - - - ``pipeline_name``: Supports ``=`` and ``!=`` comparisons. - - ``display_name``: Supports ``=``, ``!=`` comparisons, and - ``:`` wildcard. - - ``pipeline_job_user_id``: Supports ``=``, ``!=`` - comparisons, and ``:`` wildcard. for example, can check - if pipeline's display_name contains *step* by doing - display_name:"*step*" - - ``state``: Supports ``=`` and ``!=`` comparisons. - - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, - ``<=``, and ``>=`` comparisons. Values must be in RFC - 3339 format. - - ``update_time``: Supports ``=``, ``!=``, ``<``, ``>``, - ``<=``, and ``>=`` comparisons. Values must be in RFC - 3339 format. - - ``end_time``: Supports ``=``, ``!=``, ``<``, ``>``, - ``<=``, and ``>=`` comparisons. Values must be in RFC - 3339 format. - - ``labels``: Supports key-value equality and key presence. - - Filter expressions can be combined together using logical - operators (``AND`` & ``OR``). For example: - ``pipeline_name="test" AND create_time>"2020-05-18T13:30:00Z"``. - - The syntax to define filter expression is based on - https://google.aip.dev/160. - - Examples: - - - ``create_time>"2021-05-18T00:00:00Z" OR update_time>"2020-05-18T00:00:00Z"`` - PipelineJobs created or updated after 2020-05-18 00:00:00 - UTC. - - ``labels.env = "prod"`` PipelineJobs with label "env" set - to "prod". - page_size (int): - The standard list page size. - page_token (str): - The standard list page token. Typically obtained via - [ListPipelineJobsResponse.next_page_token][google.cloud.aiplatform.v1.ListPipelineJobsResponse.next_page_token] - of the previous - [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1.PipelineService.ListPipelineJobs] - call. - order_by (str): - A comma-separated list of fields to order by. The default - sort order is in ascending order. Use "desc" after a field - name for descending. You can have multiple order_by fields - provided e.g. "create_time desc, end_time", "end_time, - start_time, update_time" For example, using "create_time - desc, end_time" will order results by create time in - descending order, and if there are multiple jobs having the - same create time, order them by the end time in ascending - order. if order_by is not specified, it will order by - default order is create time in descending order. Supported - fields: - - - ``create_time`` - - ``update_time`` - - ``end_time`` - - ``start_time`` - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - order_by = proto.Field( - proto.STRING, - number=6, - ) - - -class ListPipelineJobsResponse(proto.Message): - r"""Response message for - [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1.PipelineService.ListPipelineJobs] - - Attributes: - pipeline_jobs (Sequence[google.cloud.aiplatform_v1.types.PipelineJob]): - List of PipelineJobs in the requested page. - next_page_token (str): - A token to retrieve the next page of results. Pass to - [ListPipelineJobsRequest.page_token][google.cloud.aiplatform.v1.ListPipelineJobsRequest.page_token] - to obtain that page. - """ - - @property - def raw_page(self): - return self - - pipeline_jobs = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_pipeline_job.PipelineJob, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class DeletePipelineJobRequest(proto.Message): - r"""Request message for - [PipelineService.DeletePipelineJob][google.cloud.aiplatform.v1.PipelineService.DeletePipelineJob]. - - Attributes: - name (str): - Required. The name of the PipelineJob resource to be - deleted. Format: - ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class CancelPipelineJobRequest(proto.Message): - r"""Request message for - [PipelineService.CancelPipelineJob][google.cloud.aiplatform.v1.PipelineService.CancelPipelineJob]. - - Attributes: - name (str): - Required. The name of the PipelineJob to cancel. Format: - ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/pipeline_state.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/pipeline_state.py deleted file mode 100644 index 0b41968239..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/pipeline_state.py +++ /dev/null @@ -1,40 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'PipelineState', - }, -) - - -class PipelineState(proto.Enum): - r"""Describes the state of a pipeline.""" - PIPELINE_STATE_UNSPECIFIED = 0 - PIPELINE_STATE_QUEUED = 1 - PIPELINE_STATE_PENDING = 2 - PIPELINE_STATE_RUNNING = 3 - PIPELINE_STATE_SUCCEEDED = 4 - PIPELINE_STATE_FAILED = 5 - PIPELINE_STATE_CANCELLING = 6 - PIPELINE_STATE_CANCELLED = 7 - PIPELINE_STATE_PAUSED = 8 - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/prediction_service.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/prediction_service.py deleted file mode 100644 index 517147c345..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/prediction_service.py +++ /dev/null @@ -1,273 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.api import httpbody_pb2 # type: ignore -from google.cloud.aiplatform_v1.types import explanation -from google.protobuf import struct_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'PredictRequest', - 'PredictResponse', - 'RawPredictRequest', - 'ExplainRequest', - 'ExplainResponse', - }, -) - - -class PredictRequest(proto.Message): - r"""Request message for - [PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict]. - - Attributes: - endpoint (str): - Required. The name of the Endpoint requested to serve the - prediction. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - instances (Sequence[google.protobuf.struct_pb2.Value]): - Required. The instances that are the input to the prediction - call. A DeployedModel may have an upper limit on the number - of instances it supports per request, and when it is - exceeded the prediction call errors in case of AutoML - Models, or, in case of customer created Models, the - behaviour is as documented by that Model. The schema of any - single instance may be specified via Endpoint's - DeployedModels' - [Model's][google.cloud.aiplatform.v1.DeployedModel.model] - [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] - [instance_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri]. - parameters (google.protobuf.struct_pb2.Value): - The parameters that govern the prediction. The schema of the - parameters may be specified via Endpoint's DeployedModels' - [Model's ][google.cloud.aiplatform.v1.DeployedModel.model] - [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] - [parameters_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.parameters_schema_uri]. - """ - - endpoint = proto.Field( - proto.STRING, - number=1, - ) - instances = proto.RepeatedField( - proto.MESSAGE, - number=2, - message=struct_pb2.Value, - ) - parameters = proto.Field( - proto.MESSAGE, - number=3, - message=struct_pb2.Value, - ) - - -class PredictResponse(proto.Message): - r"""Response message for - [PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict]. - - Attributes: - predictions (Sequence[google.protobuf.struct_pb2.Value]): - The predictions that are the output of the predictions call. - The schema of any single prediction may be specified via - Endpoint's DeployedModels' [Model's - ][google.cloud.aiplatform.v1.DeployedModel.model] - [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] - [prediction_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.prediction_schema_uri]. - deployed_model_id (str): - ID of the Endpoint's DeployedModel that - served this prediction. - model (str): - Output only. The resource name of the Model - which is deployed as the DeployedModel that this - prediction hits. - model_display_name (str): - Output only. The [display - name][google.cloud.aiplatform.v1.Model.display_name] of the - Model which is deployed as the DeployedModel that this - prediction hits. - """ - - predictions = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=struct_pb2.Value, - ) - deployed_model_id = proto.Field( - proto.STRING, - number=2, - ) - model = proto.Field( - proto.STRING, - number=3, - ) - model_display_name = proto.Field( - proto.STRING, - number=4, - ) - - -class RawPredictRequest(proto.Message): - r"""Request message for - [PredictionService.RawPredict][google.cloud.aiplatform.v1.PredictionService.RawPredict]. - - Attributes: - endpoint (str): - Required. The name of the Endpoint requested to serve the - prediction. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - http_body (google.api.httpbody_pb2.HttpBody): - The prediction input. Supports HTTP headers and arbitrary - data payload. - - A [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] - may have an upper limit on the number of instances it - supports per request. When this limit it is exceeded for an - AutoML model, the - [RawPredict][google.cloud.aiplatform.v1.PredictionService.RawPredict] - method returns an error. When this limit is exceeded for a - custom-trained model, the behavior varies depending on the - model. - - You can specify the schema for each instance in the - [predict_schemata.instance_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri] - field when you create a - [Model][google.cloud.aiplatform.v1.Model]. This schema - applies when you deploy the ``Model`` as a ``DeployedModel`` - to an [Endpoint][google.cloud.aiplatform.v1.Endpoint] and - use the ``RawPredict`` method. - """ - - endpoint = proto.Field( - proto.STRING, - number=1, - ) - http_body = proto.Field( - proto.MESSAGE, - number=2, - message=httpbody_pb2.HttpBody, - ) - - -class ExplainRequest(proto.Message): - r"""Request message for - [PredictionService.Explain][google.cloud.aiplatform.v1.PredictionService.Explain]. - - Attributes: - endpoint (str): - Required. The name of the Endpoint requested to serve the - explanation. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - instances (Sequence[google.protobuf.struct_pb2.Value]): - Required. The instances that are the input to the - explanation call. A DeployedModel may have an upper limit on - the number of instances it supports per request, and when it - is exceeded the explanation call errors in case of AutoML - Models, or, in case of customer created Models, the - behaviour is as documented by that Model. The schema of any - single instance may be specified via Endpoint's - DeployedModels' - [Model's][google.cloud.aiplatform.v1.DeployedModel.model] - [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] - [instance_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri]. - parameters (google.protobuf.struct_pb2.Value): - The parameters that govern the prediction. The schema of the - parameters may be specified via Endpoint's DeployedModels' - [Model's ][google.cloud.aiplatform.v1.DeployedModel.model] - [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata] - [parameters_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.parameters_schema_uri]. - explanation_spec_override (google.cloud.aiplatform_v1.types.ExplanationSpecOverride): - If specified, overrides the - [explanation_spec][google.cloud.aiplatform.v1.DeployedModel.explanation_spec] - of the DeployedModel. Can be used for explaining prediction - results with different configurations, such as: - - - Explaining top-5 predictions results as opposed to top-1; - - Increasing path count or step count of the attribution - methods to reduce approximate errors; - - Using different baselines for explaining the prediction - results. - deployed_model_id (str): - If specified, this ExplainRequest will be served by the - chosen DeployedModel, overriding - [Endpoint.traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split]. - """ - - endpoint = proto.Field( - proto.STRING, - number=1, - ) - instances = proto.RepeatedField( - proto.MESSAGE, - number=2, - message=struct_pb2.Value, - ) - parameters = proto.Field( - proto.MESSAGE, - number=4, - message=struct_pb2.Value, - ) - explanation_spec_override = proto.Field( - proto.MESSAGE, - number=5, - message=explanation.ExplanationSpecOverride, - ) - deployed_model_id = proto.Field( - proto.STRING, - number=3, - ) - - -class ExplainResponse(proto.Message): - r"""Response message for - [PredictionService.Explain][google.cloud.aiplatform.v1.PredictionService.Explain]. - - Attributes: - explanations (Sequence[google.cloud.aiplatform_v1.types.Explanation]): - The explanations of the Model's - [PredictResponse.predictions][google.cloud.aiplatform.v1.PredictResponse.predictions]. - - It has the same number of elements as - [instances][google.cloud.aiplatform.v1.ExplainRequest.instances] - to be explained. - deployed_model_id (str): - ID of the Endpoint's DeployedModel that - served this explanation. - predictions (Sequence[google.protobuf.struct_pb2.Value]): - The predictions that are the output of the predictions call. - Same as - [PredictResponse.predictions][google.cloud.aiplatform.v1.PredictResponse.predictions]. - """ - - explanations = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=explanation.Explanation, - ) - deployed_model_id = proto.Field( - proto.STRING, - number=2, - ) - predictions = proto.RepeatedField( - proto.MESSAGE, - number=3, - message=struct_pb2.Value, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/specialist_pool.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/specialist_pool.py deleted file mode 100644 index 533c7a7691..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/specialist_pool.py +++ /dev/null @@ -1,86 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'SpecialistPool', - }, -) - - -class SpecialistPool(proto.Message): - r"""SpecialistPool represents customers' own workforce to work on - their data labeling jobs. It includes a group of specialist - managers and workers. Managers are responsible for managing the - workers in this pool as well as customers' data labeling jobs - associated with this pool. Customers create specialist pool as - well as start data labeling jobs on Cloud, managers and workers - handle the jobs using CrowdCompute console. - - Attributes: - name (str): - Required. The resource name of the - SpecialistPool. - display_name (str): - Required. The user-defined name of the - SpecialistPool. The name can be up to 128 - characters long and can be consist of any UTF-8 - characters. - This field should be unique on project-level. - specialist_managers_count (int): - Output only. The number of managers in this - SpecialistPool. - specialist_manager_emails (Sequence[str]): - The email addresses of the managers in the - SpecialistPool. - pending_data_labeling_jobs (Sequence[str]): - Output only. The resource name of the pending - data labeling jobs. - specialist_worker_emails (Sequence[str]): - The email addresses of workers in the - SpecialistPool. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - specialist_managers_count = proto.Field( - proto.INT32, - number=3, - ) - specialist_manager_emails = proto.RepeatedField( - proto.STRING, - number=4, - ) - pending_data_labeling_jobs = proto.RepeatedField( - proto.STRING, - number=5, - ) - specialist_worker_emails = proto.RepeatedField( - proto.STRING, - number=7, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/specialist_pool_service.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/specialist_pool_service.py deleted file mode 100644 index b853dc8ece..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/specialist_pool_service.py +++ /dev/null @@ -1,237 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1.types import operation -from google.cloud.aiplatform_v1.types import specialist_pool as gca_specialist_pool -from google.protobuf import field_mask_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'CreateSpecialistPoolRequest', - 'CreateSpecialistPoolOperationMetadata', - 'GetSpecialistPoolRequest', - 'ListSpecialistPoolsRequest', - 'ListSpecialistPoolsResponse', - 'DeleteSpecialistPoolRequest', - 'UpdateSpecialistPoolRequest', - 'UpdateSpecialistPoolOperationMetadata', - }, -) - - -class CreateSpecialistPoolRequest(proto.Message): - r"""Request message for - [SpecialistPoolService.CreateSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.CreateSpecialistPool]. - - Attributes: - parent (str): - Required. The parent Project name for the new - SpecialistPool. The form is - ``projects/{project}/locations/{location}``. - specialist_pool (google.cloud.aiplatform_v1.types.SpecialistPool): - Required. The SpecialistPool to create. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - specialist_pool = proto.Field( - proto.MESSAGE, - number=2, - message=gca_specialist_pool.SpecialistPool, - ) - - -class CreateSpecialistPoolOperationMetadata(proto.Message): - r"""Runtime operation information for - [SpecialistPoolService.CreateSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.CreateSpecialistPool]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): - The operation generic information. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class GetSpecialistPoolRequest(proto.Message): - r"""Request message for - [SpecialistPoolService.GetSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.GetSpecialistPool]. - - Attributes: - name (str): - Required. The name of the SpecialistPool resource. The form - is - ``projects/{project}/locations/{location}/specialistPools/{specialist_pool}``. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListSpecialistPoolsRequest(proto.Message): - r"""Request message for - [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1.SpecialistPoolService.ListSpecialistPools]. - - Attributes: - parent (str): - Required. The name of the SpecialistPool's parent resource. - Format: ``projects/{project}/locations/{location}`` - page_size (int): - The standard list page size. - page_token (str): - The standard list page token. Typically obtained by - [ListSpecialistPoolsResponse.next_page_token][google.cloud.aiplatform.v1.ListSpecialistPoolsResponse.next_page_token] - of the previous - [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1.SpecialistPoolService.ListSpecialistPools] - call. Return first page if empty. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - FieldMask represents a set of - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - page_size = proto.Field( - proto.INT32, - number=2, - ) - page_token = proto.Field( - proto.STRING, - number=3, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=4, - message=field_mask_pb2.FieldMask, - ) - - -class ListSpecialistPoolsResponse(proto.Message): - r"""Response message for - [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1.SpecialistPoolService.ListSpecialistPools]. - - Attributes: - specialist_pools (Sequence[google.cloud.aiplatform_v1.types.SpecialistPool]): - A list of SpecialistPools that matches the - specified filter in the request. - next_page_token (str): - The standard List next-page token. - """ - - @property - def raw_page(self): - return self - - specialist_pools = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_specialist_pool.SpecialistPool, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class DeleteSpecialistPoolRequest(proto.Message): - r"""Request message for - [SpecialistPoolService.DeleteSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.DeleteSpecialistPool]. - - Attributes: - name (str): - Required. The resource name of the SpecialistPool to delete. - Format: - ``projects/{project}/locations/{location}/specialistPools/{specialist_pool}`` - force (bool): - If set to true, any specialist managers in - this SpecialistPool will also be deleted. - (Otherwise, the request will only work if the - SpecialistPool has no specialist managers.) - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - force = proto.Field( - proto.BOOL, - number=2, - ) - - -class UpdateSpecialistPoolRequest(proto.Message): - r"""Request message for - [SpecialistPoolService.UpdateSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.UpdateSpecialistPool]. - - Attributes: - specialist_pool (google.cloud.aiplatform_v1.types.SpecialistPool): - Required. The SpecialistPool which replaces - the resource on the server. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The update mask applies to the - resource. - """ - - specialist_pool = proto.Field( - proto.MESSAGE, - number=1, - message=gca_specialist_pool.SpecialistPool, - ) - update_mask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - - -class UpdateSpecialistPoolOperationMetadata(proto.Message): - r"""Runtime operation metadata for - [SpecialistPoolService.UpdateSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.UpdateSpecialistPool]. - - Attributes: - specialist_pool (str): - Output only. The name of the SpecialistPool to which the - specialists are being added. Format: - ``projects/{project_id}/locations/{location_id}/specialistPools/{specialist_pool}`` - generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): - The operation generic information. - """ - - specialist_pool = proto.Field( - proto.STRING, - number=1, - ) - generic_metadata = proto.Field( - proto.MESSAGE, - number=2, - message=operation.GenericOperationMetadata, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/study.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/study.py deleted file mode 100644 index 940d03ac02..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/study.py +++ /dev/null @@ -1,811 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'Study', - 'Trial', - 'StudySpec', - 'Measurement', - }, -) - - -class Study(proto.Message): - r"""LINT.IfChange - A message representing a Study. - - Attributes: - name (str): - Output only. The name of a study. The study's globally - unique identifier. Format: - ``projects/{project}/locations/{location}/studies/{study}`` - display_name (str): - Required. Describes the Study, default value - is empty string. - study_spec (google.cloud.aiplatform_v1.types.StudySpec): - Required. Configuration of the Study. - state (google.cloud.aiplatform_v1.types.Study.State): - Output only. The detailed state of a Study. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time at which the study was - created. - inactive_reason (str): - Output only. A human readable reason why the - Study is inactive. This should be empty if a - study is ACTIVE or COMPLETED. - """ - class State(proto.Enum): - r"""Describes the Study state.""" - STATE_UNSPECIFIED = 0 - ACTIVE = 1 - INACTIVE = 2 - COMPLETED = 3 - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - study_spec = proto.Field( - proto.MESSAGE, - number=3, - message='StudySpec', - ) - state = proto.Field( - proto.ENUM, - number=4, - enum=State, - ) - create_time = proto.Field( - proto.MESSAGE, - number=5, - message=timestamp_pb2.Timestamp, - ) - inactive_reason = proto.Field( - proto.STRING, - number=6, - ) - - -class Trial(proto.Message): - r"""A message representing a Trial. A Trial contains a unique set - of Parameters that has been or will be evaluated, along with the - objective metrics got by running the Trial. - - Attributes: - name (str): - Output only. Resource name of the Trial - assigned by the service. - id (str): - Output only. The identifier of the Trial - assigned by the service. - state (google.cloud.aiplatform_v1.types.Trial.State): - Output only. The detailed state of the Trial. - parameters (Sequence[google.cloud.aiplatform_v1.types.Trial.Parameter]): - Output only. The parameters of the Trial. - final_measurement (google.cloud.aiplatform_v1.types.Measurement): - Output only. The final measurement containing - the objective value. - measurements (Sequence[google.cloud.aiplatform_v1.types.Measurement]): - Output only. A list of measurements that are strictly - lexicographically ordered by their induced tuples (steps, - elapsed_duration). These are used for early stopping - computations. - start_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the Trial was started. - end_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the Trial's status changed to - ``SUCCEEDED`` or ``INFEASIBLE``. - client_id (str): - Output only. The identifier of the client that originally - requested this Trial. Each client is identified by a unique - client_id. When a client asks for a suggestion, Vertex AI - Vizier will assign it a Trial. The client should evaluate - the Trial, complete it, and report back to Vertex AI Vizier. - If suggestion is asked again by same client_id before the - Trial is completed, the same Trial will be returned. - Multiple clients with different client_ids can ask for - suggestions simultaneously, each of them will get their own - Trial. - infeasible_reason (str): - Output only. A human readable string describing why the - Trial is infeasible. This is set only if Trial state is - ``INFEASIBLE``. - custom_job (str): - Output only. The CustomJob name linked to the - Trial. It's set for a HyperparameterTuningJob's - Trial. - web_access_uris (Sequence[google.cloud.aiplatform_v1.types.Trial.WebAccessUrisEntry]): - Output only. URIs for accessing `interactive - shells `__ - (one URI for each training node). Only available if this - trial is part of a - [HyperparameterTuningJob][google.cloud.aiplatform.v1.HyperparameterTuningJob] - and the job's - [trial_job_spec.enable_web_access][google.cloud.aiplatform.v1.CustomJobSpec.enable_web_access] - field is ``true``. - - The keys are names of each node used for the trial; for - example, ``workerpool0-0`` for the primary node, - ``workerpool1-0`` for the first node in the second worker - pool, and ``workerpool1-1`` for the second node in the - second worker pool. - - The values are the URIs for each node's interactive shell. - """ - class State(proto.Enum): - r"""Describes a Trial state.""" - STATE_UNSPECIFIED = 0 - REQUESTED = 1 - ACTIVE = 2 - STOPPING = 3 - SUCCEEDED = 4 - INFEASIBLE = 5 - - class Parameter(proto.Message): - r"""A message representing a parameter to be tuned. - - Attributes: - parameter_id (str): - Output only. The ID of the parameter. The parameter should - be defined in [StudySpec's - Parameters][google.cloud.aiplatform.v1.StudySpec.parameters]. - value (google.protobuf.struct_pb2.Value): - Output only. The value of the parameter. ``number_value`` - will be set if a parameter defined in StudySpec is in type - 'INTEGER', 'DOUBLE' or 'DISCRETE'. ``string_value`` will be - set if a parameter defined in StudySpec is in type - 'CATEGORICAL'. - """ - - parameter_id = proto.Field( - proto.STRING, - number=1, - ) - value = proto.Field( - proto.MESSAGE, - number=2, - message=struct_pb2.Value, - ) - - name = proto.Field( - proto.STRING, - number=1, - ) - id = proto.Field( - proto.STRING, - number=2, - ) - state = proto.Field( - proto.ENUM, - number=3, - enum=State, - ) - parameters = proto.RepeatedField( - proto.MESSAGE, - number=4, - message=Parameter, - ) - final_measurement = proto.Field( - proto.MESSAGE, - number=5, - message='Measurement', - ) - measurements = proto.RepeatedField( - proto.MESSAGE, - number=6, - message='Measurement', - ) - start_time = proto.Field( - proto.MESSAGE, - number=7, - message=timestamp_pb2.Timestamp, - ) - end_time = proto.Field( - proto.MESSAGE, - number=8, - message=timestamp_pb2.Timestamp, - ) - client_id = proto.Field( - proto.STRING, - number=9, - ) - infeasible_reason = proto.Field( - proto.STRING, - number=10, - ) - custom_job = proto.Field( - proto.STRING, - number=11, - ) - web_access_uris = proto.MapField( - proto.STRING, - proto.STRING, - number=12, - ) - - -class StudySpec(proto.Message): - r"""Represents specification of a Study. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - decay_curve_stopping_spec (google.cloud.aiplatform_v1.types.StudySpec.DecayCurveAutomatedStoppingSpec): - The automated early stopping spec using decay - curve rule. - - This field is a member of `oneof`_ ``automated_stopping_spec``. - median_automated_stopping_spec (google.cloud.aiplatform_v1.types.StudySpec.MedianAutomatedStoppingSpec): - The automated early stopping spec using - median rule. - - This field is a member of `oneof`_ ``automated_stopping_spec``. - metrics (Sequence[google.cloud.aiplatform_v1.types.StudySpec.MetricSpec]): - Required. Metric specs for the Study. - parameters (Sequence[google.cloud.aiplatform_v1.types.StudySpec.ParameterSpec]): - Required. The set of parameters to tune. - algorithm (google.cloud.aiplatform_v1.types.StudySpec.Algorithm): - The search algorithm specified for the Study. - observation_noise (google.cloud.aiplatform_v1.types.StudySpec.ObservationNoise): - The observation noise level of the study. - Currently only supported by the Vertex AI Vizier - service. Not supported by HyperparamterTuningJob - or TrainingPipeline. - measurement_selection_type (google.cloud.aiplatform_v1.types.StudySpec.MeasurementSelectionType): - Describe which measurement selection type - will be used - """ - class Algorithm(proto.Enum): - r"""The available search algorithms for the Study.""" - ALGORITHM_UNSPECIFIED = 0 - GRID_SEARCH = 2 - RANDOM_SEARCH = 3 - - class ObservationNoise(proto.Enum): - r"""Describes the noise level of the repeated observations. - "Noisy" means that the repeated observations with the same Trial - parameters may lead to different metric evaluations. - """ - OBSERVATION_NOISE_UNSPECIFIED = 0 - LOW = 1 - HIGH = 2 - - class MeasurementSelectionType(proto.Enum): - r"""This indicates which measurement to use if/when the service - automatically selects the final measurement from previously reported - intermediate measurements. Choose this based on two considerations: - A) Do you expect your measurements to monotonically improve? If so, - choose LAST_MEASUREMENT. On the other hand, if you're in a situation - where your system can "over-train" and you expect the performance to - get better for a while but then start declining, choose - BEST_MEASUREMENT. B) Are your measurements significantly noisy - and/or irreproducible? If so, BEST_MEASUREMENT will tend to be - over-optimistic, and it may be better to choose LAST_MEASUREMENT. If - both or neither of (A) and (B) apply, it doesn't matter which - selection type is chosen. - """ - MEASUREMENT_SELECTION_TYPE_UNSPECIFIED = 0 - LAST_MEASUREMENT = 1 - BEST_MEASUREMENT = 2 - - class MetricSpec(proto.Message): - r"""Represents a metric to optimize. - - Attributes: - metric_id (str): - Required. The ID of the metric. Must not - contain whitespaces and must be unique amongst - all MetricSpecs. - goal (google.cloud.aiplatform_v1.types.StudySpec.MetricSpec.GoalType): - Required. The optimization goal of the - metric. - """ - class GoalType(proto.Enum): - r"""The available types of optimization goals.""" - GOAL_TYPE_UNSPECIFIED = 0 - MAXIMIZE = 1 - MINIMIZE = 2 - - metric_id = proto.Field( - proto.STRING, - number=1, - ) - goal = proto.Field( - proto.ENUM, - number=2, - enum='StudySpec.MetricSpec.GoalType', - ) - - class ParameterSpec(proto.Message): - r"""Represents a single parameter to optimize. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - double_value_spec (google.cloud.aiplatform_v1.types.StudySpec.ParameterSpec.DoubleValueSpec): - The value spec for a 'DOUBLE' parameter. - - This field is a member of `oneof`_ ``parameter_value_spec``. - integer_value_spec (google.cloud.aiplatform_v1.types.StudySpec.ParameterSpec.IntegerValueSpec): - The value spec for an 'INTEGER' parameter. - - This field is a member of `oneof`_ ``parameter_value_spec``. - categorical_value_spec (google.cloud.aiplatform_v1.types.StudySpec.ParameterSpec.CategoricalValueSpec): - The value spec for a 'CATEGORICAL' parameter. - - This field is a member of `oneof`_ ``parameter_value_spec``. - discrete_value_spec (google.cloud.aiplatform_v1.types.StudySpec.ParameterSpec.DiscreteValueSpec): - The value spec for a 'DISCRETE' parameter. - - This field is a member of `oneof`_ ``parameter_value_spec``. - parameter_id (str): - Required. The ID of the parameter. Must not - contain whitespaces and must be unique amongst - all ParameterSpecs. - scale_type (google.cloud.aiplatform_v1.types.StudySpec.ParameterSpec.ScaleType): - How the parameter should be scaled. Leave unset for - ``CATEGORICAL`` parameters. - conditional_parameter_specs (Sequence[google.cloud.aiplatform_v1.types.StudySpec.ParameterSpec.ConditionalParameterSpec]): - A conditional parameter node is active if the parameter's - value matches the conditional node's parent_value_condition. - - If two items in conditional_parameter_specs have the same - name, they must have disjoint parent_value_condition. - """ - class ScaleType(proto.Enum): - r"""The type of scaling that should be applied to this parameter.""" - SCALE_TYPE_UNSPECIFIED = 0 - UNIT_LINEAR_SCALE = 1 - UNIT_LOG_SCALE = 2 - UNIT_REVERSE_LOG_SCALE = 3 - - class DoubleValueSpec(proto.Message): - r"""Value specification for a parameter in ``DOUBLE`` type. - - Attributes: - min_value (float): - Required. Inclusive minimum value of the - parameter. - max_value (float): - Required. Inclusive maximum value of the - parameter. - default_value (float): - A default value for a ``DOUBLE`` parameter that is assumed - to be a relatively good starting point. Unset value signals - that there is no offered starting point. - - Currently only supported by the Vertex AI Vizier service. - Not supported by HyperparamterTuningJob or TrainingPipeline. - - This field is a member of `oneof`_ ``_default_value``. - """ - - min_value = proto.Field( - proto.DOUBLE, - number=1, - ) - max_value = proto.Field( - proto.DOUBLE, - number=2, - ) - default_value = proto.Field( - proto.DOUBLE, - number=4, - optional=True, - ) - - class IntegerValueSpec(proto.Message): - r"""Value specification for a parameter in ``INTEGER`` type. - - Attributes: - min_value (int): - Required. Inclusive minimum value of the - parameter. - max_value (int): - Required. Inclusive maximum value of the - parameter. - default_value (int): - A default value for an ``INTEGER`` parameter that is assumed - to be a relatively good starting point. Unset value signals - that there is no offered starting point. - - Currently only supported by the Vertex AI Vizier service. - Not supported by HyperparamterTuningJob or TrainingPipeline. - - This field is a member of `oneof`_ ``_default_value``. - """ - - min_value = proto.Field( - proto.INT64, - number=1, - ) - max_value = proto.Field( - proto.INT64, - number=2, - ) - default_value = proto.Field( - proto.INT64, - number=4, - optional=True, - ) - - class CategoricalValueSpec(proto.Message): - r"""Value specification for a parameter in ``CATEGORICAL`` type. - - Attributes: - values (Sequence[str]): - Required. The list of possible categories. - default_value (str): - A default value for a ``CATEGORICAL`` parameter that is - assumed to be a relatively good starting point. Unset value - signals that there is no offered starting point. - - Currently only supported by the Vizier service. Not - supported by HyperparamterTuningJob or TrainingPipeline. - - This field is a member of `oneof`_ ``_default_value``. - """ - - values = proto.RepeatedField( - proto.STRING, - number=1, - ) - default_value = proto.Field( - proto.STRING, - number=3, - optional=True, - ) - - class DiscreteValueSpec(proto.Message): - r"""Value specification for a parameter in ``DISCRETE`` type. - - Attributes: - values (Sequence[float]): - Required. A list of possible values. - The list should be in increasing order and at - least 1e-10 apart. For instance, this parameter - might have possible settings of 1.5, 2.5, and - 4.0. This list should not contain more than - 1,000 values. - default_value (float): - A default value for a ``DISCRETE`` parameter that is assumed - to be a relatively good starting point. Unset value signals - that there is no offered starting point. It automatically - rounds to the nearest feasible discrete point. - - Currently only supported by the Vizier service. Not - supported by HyperparamterTuningJob or TrainingPipeline. - - This field is a member of `oneof`_ ``_default_value``. - """ - - values = proto.RepeatedField( - proto.DOUBLE, - number=1, - ) - default_value = proto.Field( - proto.DOUBLE, - number=3, - optional=True, - ) - - class ConditionalParameterSpec(proto.Message): - r"""Represents a parameter spec with condition from its parent - parameter. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - parent_discrete_values (google.cloud.aiplatform_v1.types.StudySpec.ParameterSpec.ConditionalParameterSpec.DiscreteValueCondition): - The spec for matching values from a parent parameter of - ``DISCRETE`` type. - - This field is a member of `oneof`_ ``parent_value_condition``. - parent_int_values (google.cloud.aiplatform_v1.types.StudySpec.ParameterSpec.ConditionalParameterSpec.IntValueCondition): - The spec for matching values from a parent parameter of - ``INTEGER`` type. - - This field is a member of `oneof`_ ``parent_value_condition``. - parent_categorical_values (google.cloud.aiplatform_v1.types.StudySpec.ParameterSpec.ConditionalParameterSpec.CategoricalValueCondition): - The spec for matching values from a parent parameter of - ``CATEGORICAL`` type. - - This field is a member of `oneof`_ ``parent_value_condition``. - parameter_spec (google.cloud.aiplatform_v1.types.StudySpec.ParameterSpec): - Required. The spec for a conditional - parameter. - """ - - class DiscreteValueCondition(proto.Message): - r"""Represents the spec to match discrete values from parent - parameter. - - Attributes: - values (Sequence[float]): - Required. Matches values of the parent parameter of - 'DISCRETE' type. All values must exist in - ``discrete_value_spec`` of parent parameter. - - The Epsilon of the value matching is 1e-10. - """ - - values = proto.RepeatedField( - proto.DOUBLE, - number=1, - ) - - class IntValueCondition(proto.Message): - r"""Represents the spec to match integer values from parent - parameter. - - Attributes: - values (Sequence[int]): - Required. Matches values of the parent parameter of - 'INTEGER' type. All values must lie in - ``integer_value_spec`` of parent parameter. - """ - - values = proto.RepeatedField( - proto.INT64, - number=1, - ) - - class CategoricalValueCondition(proto.Message): - r"""Represents the spec to match categorical values from parent - parameter. - - Attributes: - values (Sequence[str]): - Required. Matches values of the parent parameter of - 'CATEGORICAL' type. All values must exist in - ``categorical_value_spec`` of parent parameter. - """ - - values = proto.RepeatedField( - proto.STRING, - number=1, - ) - - parent_discrete_values = proto.Field( - proto.MESSAGE, - number=2, - oneof='parent_value_condition', - message='StudySpec.ParameterSpec.ConditionalParameterSpec.DiscreteValueCondition', - ) - parent_int_values = proto.Field( - proto.MESSAGE, - number=3, - oneof='parent_value_condition', - message='StudySpec.ParameterSpec.ConditionalParameterSpec.IntValueCondition', - ) - parent_categorical_values = proto.Field( - proto.MESSAGE, - number=4, - oneof='parent_value_condition', - message='StudySpec.ParameterSpec.ConditionalParameterSpec.CategoricalValueCondition', - ) - parameter_spec = proto.Field( - proto.MESSAGE, - number=1, - message='StudySpec.ParameterSpec', - ) - - double_value_spec = proto.Field( - proto.MESSAGE, - number=2, - oneof='parameter_value_spec', - message='StudySpec.ParameterSpec.DoubleValueSpec', - ) - integer_value_spec = proto.Field( - proto.MESSAGE, - number=3, - oneof='parameter_value_spec', - message='StudySpec.ParameterSpec.IntegerValueSpec', - ) - categorical_value_spec = proto.Field( - proto.MESSAGE, - number=4, - oneof='parameter_value_spec', - message='StudySpec.ParameterSpec.CategoricalValueSpec', - ) - discrete_value_spec = proto.Field( - proto.MESSAGE, - number=5, - oneof='parameter_value_spec', - message='StudySpec.ParameterSpec.DiscreteValueSpec', - ) - parameter_id = proto.Field( - proto.STRING, - number=1, - ) - scale_type = proto.Field( - proto.ENUM, - number=6, - enum='StudySpec.ParameterSpec.ScaleType', - ) - conditional_parameter_specs = proto.RepeatedField( - proto.MESSAGE, - number=10, - message='StudySpec.ParameterSpec.ConditionalParameterSpec', - ) - - class DecayCurveAutomatedStoppingSpec(proto.Message): - r"""The decay curve automated stopping rule builds a Gaussian - Process Regressor to predict the final objective value of a - Trial based on the already completed Trials and the intermediate - measurements of the current Trial. Early stopping is requested - for the current Trial if there is very low probability to exceed - the optimal value found so far. - - Attributes: - use_elapsed_duration (bool): - True if - [Measurement.elapsed_duration][google.cloud.aiplatform.v1.Measurement.elapsed_duration] - is used as the x-axis of each Trials Decay Curve. Otherwise, - [Measurement.step_count][google.cloud.aiplatform.v1.Measurement.step_count] - will be used as the x-axis. - """ - - use_elapsed_duration = proto.Field( - proto.BOOL, - number=1, - ) - - class MedianAutomatedStoppingSpec(proto.Message): - r"""The median automated stopping rule stops a pending Trial if the - Trial's best objective_value is strictly below the median - 'performance' of all completed Trials reported up to the Trial's - last measurement. Currently, 'performance' refers to the running - average of the objective values reported by the Trial in each - measurement. - - Attributes: - use_elapsed_duration (bool): - True if median automated stopping rule applies on - [Measurement.elapsed_duration][google.cloud.aiplatform.v1.Measurement.elapsed_duration]. - It means that elapsed_duration field of latest measurement - of current Trial is used to compute median objective value - for each completed Trials. - """ - - use_elapsed_duration = proto.Field( - proto.BOOL, - number=1, - ) - - decay_curve_stopping_spec = proto.Field( - proto.MESSAGE, - number=4, - oneof='automated_stopping_spec', - message=DecayCurveAutomatedStoppingSpec, - ) - median_automated_stopping_spec = proto.Field( - proto.MESSAGE, - number=5, - oneof='automated_stopping_spec', - message=MedianAutomatedStoppingSpec, - ) - metrics = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=MetricSpec, - ) - parameters = proto.RepeatedField( - proto.MESSAGE, - number=2, - message=ParameterSpec, - ) - algorithm = proto.Field( - proto.ENUM, - number=3, - enum=Algorithm, - ) - observation_noise = proto.Field( - proto.ENUM, - number=6, - enum=ObservationNoise, - ) - measurement_selection_type = proto.Field( - proto.ENUM, - number=7, - enum=MeasurementSelectionType, - ) - - -class Measurement(proto.Message): - r"""A message representing a Measurement of a Trial. A - Measurement contains the Metrics got by executing a Trial using - suggested hyperparameter values. - - Attributes: - elapsed_duration (google.protobuf.duration_pb2.Duration): - Output only. Time that the Trial has been - running at the point of this Measurement. - step_count (int): - Output only. The number of steps the machine - learning model has been trained for. Must be - non-negative. - metrics (Sequence[google.cloud.aiplatform_v1.types.Measurement.Metric]): - Output only. A list of metrics got by - evaluating the objective functions using - suggested Parameter values. - """ - - class Metric(proto.Message): - r"""A message representing a metric in the measurement. - - Attributes: - metric_id (str): - Output only. The ID of the Metric. The Metric should be - defined in [StudySpec's - Metrics][google.cloud.aiplatform.v1.StudySpec.metrics]. - value (float): - Output only. The value for this metric. - """ - - metric_id = proto.Field( - proto.STRING, - number=1, - ) - value = proto.Field( - proto.DOUBLE, - number=2, - ) - - elapsed_duration = proto.Field( - proto.MESSAGE, - number=1, - message=duration_pb2.Duration, - ) - step_count = proto.Field( - proto.INT64, - number=2, - ) - metrics = proto.RepeatedField( - proto.MESSAGE, - number=3, - message=Metric, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/tensorboard.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/tensorboard.py deleted file mode 100644 index ef7567b50e..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/tensorboard.py +++ /dev/null @@ -1,131 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'Tensorboard', - }, -) - - -class Tensorboard(proto.Message): - r"""Tensorboard is a physical database that stores users' - training metrics. A default Tensorboard is provided in each - region of a GCP project. If needed users can also create extra - Tensorboards in their projects. - - Attributes: - name (str): - Output only. Name of the Tensorboard. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` - display_name (str): - Required. User provided name of this - Tensorboard. - description (str): - Description of this Tensorboard. - encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec): - Customer-managed encryption key spec for a - Tensorboard. If set, this Tensorboard and all - sub-resources of this Tensorboard will be - secured by this key. - blob_storage_path_prefix (str): - Output only. Consumer project Cloud Storage - path prefix used to store blob data, which can - either be a bucket or directory. Does not end - with a '/'. - run_count (int): - Output only. The number of Runs stored in - this Tensorboard. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Tensorboard - was created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Tensorboard - was last updated. - labels (Sequence[google.cloud.aiplatform_v1.types.Tensorboard.LabelsEntry]): - The labels with user-defined metadata to - organize your Tensorboards. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. No more than 64 user labels can be - associated with one Tensorboard (System labels - are excluded). - - See https://goo.gl/xmQnxf for more information - and examples of labels. System reserved label - keys are prefixed with - "aiplatform.googleapis.com/" and are immutable. - etag (str): - Used to perform a consistent read-modify- - rite updates. If not set, a blind "overwrite" - update happens. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - description = proto.Field( - proto.STRING, - number=3, - ) - encryption_spec = proto.Field( - proto.MESSAGE, - number=11, - message=gca_encryption_spec.EncryptionSpec, - ) - blob_storage_path_prefix = proto.Field( - proto.STRING, - number=10, - ) - run_count = proto.Field( - proto.INT32, - number=5, - ) - create_time = proto.Field( - proto.MESSAGE, - number=6, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=7, - message=timestamp_pb2.Timestamp, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=8, - ) - etag = proto.Field( - proto.STRING, - number=9, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/tensorboard_data.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/tensorboard_data.py deleted file mode 100644 index e7e17d18a7..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/tensorboard_data.py +++ /dev/null @@ -1,205 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1.types import tensorboard_time_series -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'TimeSeriesData', - 'TimeSeriesDataPoint', - 'Scalar', - 'TensorboardTensor', - 'TensorboardBlobSequence', - 'TensorboardBlob', - }, -) - - -class TimeSeriesData(proto.Message): - r"""All the data stored in a TensorboardTimeSeries. - - Attributes: - tensorboard_time_series_id (str): - Required. The ID of the - TensorboardTimeSeries, which will become the - final component of the TensorboardTimeSeries' - resource name - value_type (google.cloud.aiplatform_v1.types.TensorboardTimeSeries.ValueType): - Required. Immutable. The value type of this - time series. All the values in this time series - data must match this value type. - values (Sequence[google.cloud.aiplatform_v1.types.TimeSeriesDataPoint]): - Required. Data points in this time series. - """ - - tensorboard_time_series_id = proto.Field( - proto.STRING, - number=1, - ) - value_type = proto.Field( - proto.ENUM, - number=2, - enum=tensorboard_time_series.TensorboardTimeSeries.ValueType, - ) - values = proto.RepeatedField( - proto.MESSAGE, - number=3, - message='TimeSeriesDataPoint', - ) - - -class TimeSeriesDataPoint(proto.Message): - r"""A TensorboardTimeSeries data point. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - scalar (google.cloud.aiplatform_v1.types.Scalar): - A scalar value. - - This field is a member of `oneof`_ ``value``. - tensor (google.cloud.aiplatform_v1.types.TensorboardTensor): - A tensor value. - - This field is a member of `oneof`_ ``value``. - blobs (google.cloud.aiplatform_v1.types.TensorboardBlobSequence): - A blob sequence value. - - This field is a member of `oneof`_ ``value``. - wall_time (google.protobuf.timestamp_pb2.Timestamp): - Wall clock timestamp when this data point is - generated by the end user. - step (int): - Step index of this data point within the run. - """ - - scalar = proto.Field( - proto.MESSAGE, - number=3, - oneof='value', - message='Scalar', - ) - tensor = proto.Field( - proto.MESSAGE, - number=4, - oneof='value', - message='TensorboardTensor', - ) - blobs = proto.Field( - proto.MESSAGE, - number=5, - oneof='value', - message='TensorboardBlobSequence', - ) - wall_time = proto.Field( - proto.MESSAGE, - number=1, - message=timestamp_pb2.Timestamp, - ) - step = proto.Field( - proto.INT64, - number=2, - ) - - -class Scalar(proto.Message): - r"""One point viewable on a scalar metric plot. - - Attributes: - value (float): - Value of the point at this step / timestamp. - """ - - value = proto.Field( - proto.DOUBLE, - number=1, - ) - - -class TensorboardTensor(proto.Message): - r"""One point viewable on a tensor metric plot. - - Attributes: - value (bytes): - Required. Serialized form of - https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/framework/tensor.proto - version_number (int): - Optional. Version number of TensorProto used to serialize - [value][google.cloud.aiplatform.v1.TensorboardTensor.value]. - """ - - value = proto.Field( - proto.BYTES, - number=1, - ) - version_number = proto.Field( - proto.INT32, - number=2, - ) - - -class TensorboardBlobSequence(proto.Message): - r"""One point viewable on a blob metric plot, but mostly just a wrapper - message to work around repeated fields can't be used directly within - ``oneof`` fields. - - Attributes: - values (Sequence[google.cloud.aiplatform_v1.types.TensorboardBlob]): - List of blobs contained within the sequence. - """ - - values = proto.RepeatedField( - proto.MESSAGE, - number=1, - message='TensorboardBlob', - ) - - -class TensorboardBlob(proto.Message): - r"""One blob (e.g, image, graph) viewable on a blob metric plot. - - Attributes: - id (str): - Output only. A URI safe key uniquely - identifying a blob. Can be used to locate the - blob stored in the Cloud Storage bucket of the - consumer project. - data (bytes): - Optional. The bytes of the blob is not - present unless it's returned by the - ReadTensorboardBlobData endpoint. - """ - - id = proto.Field( - proto.STRING, - number=1, - ) - data = proto.Field( - proto.BYTES, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/tensorboard_experiment.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/tensorboard_experiment.py deleted file mode 100644 index d8643815a6..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/tensorboard_experiment.py +++ /dev/null @@ -1,115 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'TensorboardExperiment', - }, -) - - -class TensorboardExperiment(proto.Message): - r"""A TensorboardExperiment is a group of TensorboardRuns, that - are typically the results of a training job run, in a - Tensorboard. - - Attributes: - name (str): - Output only. Name of the TensorboardExperiment. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` - display_name (str): - User provided name of this - TensorboardExperiment. - description (str): - Description of this TensorboardExperiment. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - TensorboardExperiment was created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - TensorboardExperiment was last updated. - labels (Sequence[google.cloud.aiplatform_v1.types.TensorboardExperiment.LabelsEntry]): - The labels with user-defined metadata to organize your - Datasets. - - Label keys and values can be no longer than 64 characters - (Unicode codepoints), can only contain lowercase letters, - numeric characters, underscores and dashes. International - characters are allowed. No more than 64 user labels can be - associated with one Dataset (System labels are excluded). - - See https://goo.gl/xmQnxf for more information and examples - of labels. System reserved label keys are prefixed with - "aiplatform.googleapis.com/" and are immutable. Following - system labels exist for each Dataset: - - - "aiplatform.googleapis.com/dataset_metadata_schema": - - - output only, its value is the - [metadata_schema's][metadata_schema_uri] title. - etag (str): - Used to perform consistent read-modify-write - updates. If not set, a blind "overwrite" update - happens. - source (str): - Immutable. Source of the - TensorboardExperiment. Example: a custom - training job. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - description = proto.Field( - proto.STRING, - number=3, - ) - create_time = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=5, - message=timestamp_pb2.Timestamp, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=6, - ) - etag = proto.Field( - proto.STRING, - number=7, - ) - source = proto.Field( - proto.STRING, - number=8, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/tensorboard_run.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/tensorboard_run.py deleted file mode 100644 index fcb782e681..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/tensorboard_run.py +++ /dev/null @@ -1,112 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'TensorboardRun', - }, -) - - -class TensorboardRun(proto.Message): - r"""TensorboardRun maps to a specific execution of a training job - with a given set of hyperparameter values, model definition, - dataset, etc - - Attributes: - name (str): - Output only. Name of the TensorboardRun. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` - display_name (str): - Required. User provided name of this - TensorboardRun. This value must be unique among - all TensorboardRuns belonging to the same parent - TensorboardExperiment. - description (str): - Description of this TensorboardRun. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - TensorboardRun was created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - TensorboardRun was last updated. - labels (Sequence[google.cloud.aiplatform_v1.types.TensorboardRun.LabelsEntry]): - The labels with user-defined metadata to organize your - TensorboardRuns. - - This field will be used to filter and visualize Runs in the - Tensorboard UI. For example, a Vertex AI training job can - set a label aiplatform.googleapis.com/training_job_id=xxxxx - to all the runs created within that job. An end user can set - a label experiment_id=xxxxx for all the runs produced in a - Jupyter notebook. These runs can be grouped by a label value - and visualized together in the Tensorboard UI. - - Label keys and values can be no longer than 64 characters - (Unicode codepoints), can only contain lowercase letters, - numeric characters, underscores and dashes. International - characters are allowed. No more than 64 user labels can be - associated with one TensorboardRun (System labels are - excluded). - - See https://goo.gl/xmQnxf for more information and examples - of labels. System reserved label keys are prefixed with - "aiplatform.googleapis.com/" and are immutable. - etag (str): - Used to perform a consistent read-modify- - rite updates. If not set, a blind "overwrite" - update happens. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - description = proto.Field( - proto.STRING, - number=3, - ) - create_time = proto.Field( - proto.MESSAGE, - number=6, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=7, - message=timestamp_pb2.Timestamp, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=8, - ) - etag = proto.Field( - proto.STRING, - number=9, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/tensorboard_service.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/tensorboard_service.py deleted file mode 100644 index efb7b86328..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/tensorboard_service.py +++ /dev/null @@ -1,1224 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1.types import operation -from google.cloud.aiplatform_v1.types import tensorboard as gca_tensorboard -from google.cloud.aiplatform_v1.types import tensorboard_data -from google.cloud.aiplatform_v1.types import tensorboard_experiment as gca_tensorboard_experiment -from google.cloud.aiplatform_v1.types import tensorboard_run as gca_tensorboard_run -from google.cloud.aiplatform_v1.types import tensorboard_time_series as gca_tensorboard_time_series -from google.protobuf import field_mask_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'CreateTensorboardRequest', - 'GetTensorboardRequest', - 'ListTensorboardsRequest', - 'ListTensorboardsResponse', - 'UpdateTensorboardRequest', - 'DeleteTensorboardRequest', - 'CreateTensorboardExperimentRequest', - 'GetTensorboardExperimentRequest', - 'ListTensorboardExperimentsRequest', - 'ListTensorboardExperimentsResponse', - 'UpdateTensorboardExperimentRequest', - 'DeleteTensorboardExperimentRequest', - 'BatchCreateTensorboardRunsRequest', - 'BatchCreateTensorboardRunsResponse', - 'CreateTensorboardRunRequest', - 'GetTensorboardRunRequest', - 'ReadTensorboardBlobDataRequest', - 'ReadTensorboardBlobDataResponse', - 'ListTensorboardRunsRequest', - 'ListTensorboardRunsResponse', - 'UpdateTensorboardRunRequest', - 'DeleteTensorboardRunRequest', - 'BatchCreateTensorboardTimeSeriesRequest', - 'BatchCreateTensorboardTimeSeriesResponse', - 'CreateTensorboardTimeSeriesRequest', - 'GetTensorboardTimeSeriesRequest', - 'ListTensorboardTimeSeriesRequest', - 'ListTensorboardTimeSeriesResponse', - 'UpdateTensorboardTimeSeriesRequest', - 'DeleteTensorboardTimeSeriesRequest', - 'BatchReadTensorboardTimeSeriesDataRequest', - 'BatchReadTensorboardTimeSeriesDataResponse', - 'ReadTensorboardTimeSeriesDataRequest', - 'ReadTensorboardTimeSeriesDataResponse', - 'WriteTensorboardExperimentDataRequest', - 'WriteTensorboardExperimentDataResponse', - 'WriteTensorboardRunDataRequest', - 'WriteTensorboardRunDataResponse', - 'ExportTensorboardTimeSeriesDataRequest', - 'ExportTensorboardTimeSeriesDataResponse', - 'CreateTensorboardOperationMetadata', - 'UpdateTensorboardOperationMetadata', - }, -) - - -class CreateTensorboardRequest(proto.Message): - r"""Request message for - [TensorboardService.CreateTensorboard][google.cloud.aiplatform.v1.TensorboardService.CreateTensorboard]. - - Attributes: - parent (str): - Required. The resource name of the Location to create the - Tensorboard in. Format: - ``projects/{project}/locations/{location}`` - tensorboard (google.cloud.aiplatform_v1.types.Tensorboard): - Required. The Tensorboard to create. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - tensorboard = proto.Field( - proto.MESSAGE, - number=2, - message=gca_tensorboard.Tensorboard, - ) - - -class GetTensorboardRequest(proto.Message): - r"""Request message for - [TensorboardService.GetTensorboard][google.cloud.aiplatform.v1.TensorboardService.GetTensorboard]. - - Attributes: - name (str): - Required. The name of the Tensorboard resource. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListTensorboardsRequest(proto.Message): - r"""Request message for - [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1.TensorboardService.ListTensorboards]. - - Attributes: - parent (str): - Required. The resource name of the Location to list - Tensorboards. Format: - ``projects/{project}/locations/{location}`` - filter (str): - Lists the Tensorboards that match the filter - expression. - page_size (int): - The maximum number of Tensorboards to return. - The service may return fewer than this value. If - unspecified, at most 100 Tensorboards will be - returned. The maximum value is 100; values above - 100 will be coerced to 100. - page_token (str): - A page token, received from a previous - [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1.TensorboardService.ListTensorboards] - call. Provide this to retrieve the subsequent page. - - When paginating, all other parameters provided to - [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1.TensorboardService.ListTensorboards] - must match the call that provided the page token. - order_by (str): - Field to use to sort the list. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - order_by = proto.Field( - proto.STRING, - number=5, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=6, - message=field_mask_pb2.FieldMask, - ) - - -class ListTensorboardsResponse(proto.Message): - r"""Response message for - [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1.TensorboardService.ListTensorboards]. - - Attributes: - tensorboards (Sequence[google.cloud.aiplatform_v1.types.Tensorboard]): - The Tensorboards mathching the request. - next_page_token (str): - A token, which can be sent as - [ListTensorboardsRequest.page_token][google.cloud.aiplatform.v1.ListTensorboardsRequest.page_token] - to retrieve the next page. If this field is omitted, there - are no subsequent pages. - """ - - @property - def raw_page(self): - return self - - tensorboards = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_tensorboard.Tensorboard, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class UpdateTensorboardRequest(proto.Message): - r"""Request message for - [TensorboardService.UpdateTensorboard][google.cloud.aiplatform.v1.TensorboardService.UpdateTensorboard]. - - Attributes: - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. Field mask is used to specify the fields to be - overwritten in the Tensorboard resource by the update. The - fields specified in the update_mask are relative to the - resource, not the full request. A field will be overwritten - if it is in the mask. If the user does not provide a mask - then all fields will be overwritten if new values are - specified. - tensorboard (google.cloud.aiplatform_v1.types.Tensorboard): - Required. The Tensorboard's ``name`` field is used to - identify the Tensorboard to be updated. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` - """ - - update_mask = proto.Field( - proto.MESSAGE, - number=1, - message=field_mask_pb2.FieldMask, - ) - tensorboard = proto.Field( - proto.MESSAGE, - number=2, - message=gca_tensorboard.Tensorboard, - ) - - -class DeleteTensorboardRequest(proto.Message): - r"""Request message for - [TensorboardService.DeleteTensorboard][google.cloud.aiplatform.v1.TensorboardService.DeleteTensorboard]. - - Attributes: - name (str): - Required. The name of the Tensorboard to be deleted. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class CreateTensorboardExperimentRequest(proto.Message): - r"""Request message for - [TensorboardService.CreateTensorboardExperiment][google.cloud.aiplatform.v1.TensorboardService.CreateTensorboardExperiment]. - - Attributes: - parent (str): - Required. The resource name of the Tensorboard to create the - TensorboardExperiment in. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` - tensorboard_experiment (google.cloud.aiplatform_v1.types.TensorboardExperiment): - The TensorboardExperiment to create. - tensorboard_experiment_id (str): - Required. The ID to use for the Tensorboard experiment, - which will become the final component of the Tensorboard - experiment's resource name. - - This value should be 1-128 characters, and valid characters - are /[a-z][0-9]-/. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - tensorboard_experiment = proto.Field( - proto.MESSAGE, - number=2, - message=gca_tensorboard_experiment.TensorboardExperiment, - ) - tensorboard_experiment_id = proto.Field( - proto.STRING, - number=3, - ) - - -class GetTensorboardExperimentRequest(proto.Message): - r"""Request message for - [TensorboardService.GetTensorboardExperiment][google.cloud.aiplatform.v1.TensorboardService.GetTensorboardExperiment]. - - Attributes: - name (str): - Required. The name of the TensorboardExperiment resource. - Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListTensorboardExperimentsRequest(proto.Message): - r"""Request message for - [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1.TensorboardService.ListTensorboardExperiments]. - - Attributes: - parent (str): - Required. The resource name of the - Tensorboard to list TensorboardExperiments. - Format: - 'projects/{project}/locations/{location}/tensorboards/{tensorboard}' - filter (str): - Lists the TensorboardExperiments that match - the filter expression. - page_size (int): - The maximum number of TensorboardExperiments - to return. The service may return fewer than - this value. If unspecified, at most 50 - TensorboardExperiments will be returned. The - maximum value is 1000; values above 1000 will be - coerced to 1000. - page_token (str): - A page token, received from a previous - [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1.TensorboardService.ListTensorboardExperiments] - call. Provide this to retrieve the subsequent page. - - When paginating, all other parameters provided to - [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1.TensorboardService.ListTensorboardExperiments] - must match the call that provided the page token. - order_by (str): - Field to use to sort the list. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - order_by = proto.Field( - proto.STRING, - number=5, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=6, - message=field_mask_pb2.FieldMask, - ) - - -class ListTensorboardExperimentsResponse(proto.Message): - r"""Response message for - [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1.TensorboardService.ListTensorboardExperiments]. - - Attributes: - tensorboard_experiments (Sequence[google.cloud.aiplatform_v1.types.TensorboardExperiment]): - The TensorboardExperiments mathching the - request. - next_page_token (str): - A token, which can be sent as - [ListTensorboardExperimentsRequest.page_token][google.cloud.aiplatform.v1.ListTensorboardExperimentsRequest.page_token] - to retrieve the next page. If this field is omitted, there - are no subsequent pages. - """ - - @property - def raw_page(self): - return self - - tensorboard_experiments = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_tensorboard_experiment.TensorboardExperiment, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class UpdateTensorboardExperimentRequest(proto.Message): - r"""Request message for - [TensorboardService.UpdateTensorboardExperiment][google.cloud.aiplatform.v1.TensorboardService.UpdateTensorboardExperiment]. - - Attributes: - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. Field mask is used to specify the fields to be - overwritten in the TensorboardExperiment resource by the - update. The fields specified in the update_mask are relative - to the resource, not the full request. A field will be - overwritten if it is in the mask. If the user does not - provide a mask then all fields will be overwritten if new - values are specified. - tensorboard_experiment (google.cloud.aiplatform_v1.types.TensorboardExperiment): - Required. The TensorboardExperiment's ``name`` field is used - to identify the TensorboardExperiment to be updated. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` - """ - - update_mask = proto.Field( - proto.MESSAGE, - number=1, - message=field_mask_pb2.FieldMask, - ) - tensorboard_experiment = proto.Field( - proto.MESSAGE, - number=2, - message=gca_tensorboard_experiment.TensorboardExperiment, - ) - - -class DeleteTensorboardExperimentRequest(proto.Message): - r"""Request message for - [TensorboardService.DeleteTensorboardExperiment][google.cloud.aiplatform.v1.TensorboardService.DeleteTensorboardExperiment]. - - Attributes: - name (str): - Required. The name of the TensorboardExperiment to be - deleted. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class BatchCreateTensorboardRunsRequest(proto.Message): - r"""Request message for - [TensorboardService.BatchCreateTensorboardRuns][google.cloud.aiplatform.v1.TensorboardService.BatchCreateTensorboardRuns]. - - Attributes: - parent (str): - Required. The resource name of the TensorboardExperiment to - create the TensorboardRuns in. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` - The parent field in the CreateTensorboardRunRequest messages - must match this field. - requests (Sequence[google.cloud.aiplatform_v1.types.CreateTensorboardRunRequest]): - Required. The request message specifying the - TensorboardRuns to create. A maximum of 1000 - TensorboardRuns can be created in a batch. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - requests = proto.RepeatedField( - proto.MESSAGE, - number=2, - message='CreateTensorboardRunRequest', - ) - - -class BatchCreateTensorboardRunsResponse(proto.Message): - r"""Response message for - [TensorboardService.BatchCreateTensorboardRuns][google.cloud.aiplatform.v1.TensorboardService.BatchCreateTensorboardRuns]. - - Attributes: - tensorboard_runs (Sequence[google.cloud.aiplatform_v1.types.TensorboardRun]): - The created TensorboardRuns. - """ - - tensorboard_runs = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_tensorboard_run.TensorboardRun, - ) - - -class CreateTensorboardRunRequest(proto.Message): - r"""Request message for - [TensorboardService.CreateTensorboardRun][google.cloud.aiplatform.v1.TensorboardService.CreateTensorboardRun]. - - Attributes: - parent (str): - Required. The resource name of the TensorboardExperiment to - create the TensorboardRun in. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` - tensorboard_run (google.cloud.aiplatform_v1.types.TensorboardRun): - Required. The TensorboardRun to create. - tensorboard_run_id (str): - Required. The ID to use for the Tensorboard run, which will - become the final component of the Tensorboard run's resource - name. - - This value should be 1-128 characters, and valid characters - are /[a-z][0-9]-/. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - tensorboard_run = proto.Field( - proto.MESSAGE, - number=2, - message=gca_tensorboard_run.TensorboardRun, - ) - tensorboard_run_id = proto.Field( - proto.STRING, - number=3, - ) - - -class GetTensorboardRunRequest(proto.Message): - r"""Request message for - [TensorboardService.GetTensorboardRun][google.cloud.aiplatform.v1.TensorboardService.GetTensorboardRun]. - - Attributes: - name (str): - Required. The name of the TensorboardRun resource. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ReadTensorboardBlobDataRequest(proto.Message): - r"""Request message for - [TensorboardService.ReadTensorboardBlobData][google.cloud.aiplatform.v1.TensorboardService.ReadTensorboardBlobData]. - - Attributes: - time_series (str): - Required. The resource name of the TensorboardTimeSeries to - list Blobs. Format: - 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}' - blob_ids (Sequence[str]): - IDs of the blobs to read. - """ - - time_series = proto.Field( - proto.STRING, - number=1, - ) - blob_ids = proto.RepeatedField( - proto.STRING, - number=2, - ) - - -class ReadTensorboardBlobDataResponse(proto.Message): - r"""Response message for - [TensorboardService.ReadTensorboardBlobData][google.cloud.aiplatform.v1.TensorboardService.ReadTensorboardBlobData]. - - Attributes: - blobs (Sequence[google.cloud.aiplatform_v1.types.TensorboardBlob]): - Blob messages containing blob bytes. - """ - - blobs = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=tensorboard_data.TensorboardBlob, - ) - - -class ListTensorboardRunsRequest(proto.Message): - r"""Request message for - [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1.TensorboardService.ListTensorboardRuns]. - - Attributes: - parent (str): - Required. The resource name of the - TensorboardExperiment to list TensorboardRuns. - Format: - 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}' - filter (str): - Lists the TensorboardRuns that match the - filter expression. - page_size (int): - The maximum number of TensorboardRuns to - return. The service may return fewer than this - value. If unspecified, at most 50 - TensorboardRuns will be returned. The maximum - value is 1000; values above 1000 will be coerced - to 1000. - page_token (str): - A page token, received from a previous - [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1.TensorboardService.ListTensorboardRuns] - call. Provide this to retrieve the subsequent page. - - When paginating, all other parameters provided to - [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1.TensorboardService.ListTensorboardRuns] - must match the call that provided the page token. - order_by (str): - Field to use to sort the list. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - order_by = proto.Field( - proto.STRING, - number=5, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=6, - message=field_mask_pb2.FieldMask, - ) - - -class ListTensorboardRunsResponse(proto.Message): - r"""Response message for - [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1.TensorboardService.ListTensorboardRuns]. - - Attributes: - tensorboard_runs (Sequence[google.cloud.aiplatform_v1.types.TensorboardRun]): - The TensorboardRuns mathching the request. - next_page_token (str): - A token, which can be sent as - [ListTensorboardRunsRequest.page_token][google.cloud.aiplatform.v1.ListTensorboardRunsRequest.page_token] - to retrieve the next page. If this field is omitted, there - are no subsequent pages. - """ - - @property - def raw_page(self): - return self - - tensorboard_runs = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_tensorboard_run.TensorboardRun, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class UpdateTensorboardRunRequest(proto.Message): - r"""Request message for - [TensorboardService.UpdateTensorboardRun][google.cloud.aiplatform.v1.TensorboardService.UpdateTensorboardRun]. - - Attributes: - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. Field mask is used to specify the fields to be - overwritten in the TensorboardRun resource by the update. - The fields specified in the update_mask are relative to the - resource, not the full request. A field will be overwritten - if it is in the mask. If the user does not provide a mask - then all fields will be overwritten if new values are - specified. - tensorboard_run (google.cloud.aiplatform_v1.types.TensorboardRun): - Required. The TensorboardRun's ``name`` field is used to - identify the TensorboardRun to be updated. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` - """ - - update_mask = proto.Field( - proto.MESSAGE, - number=1, - message=field_mask_pb2.FieldMask, - ) - tensorboard_run = proto.Field( - proto.MESSAGE, - number=2, - message=gca_tensorboard_run.TensorboardRun, - ) - - -class DeleteTensorboardRunRequest(proto.Message): - r"""Request message for - [TensorboardService.DeleteTensorboardRun][google.cloud.aiplatform.v1.TensorboardService.DeleteTensorboardRun]. - - Attributes: - name (str): - Required. The name of the TensorboardRun to be deleted. - Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class BatchCreateTensorboardTimeSeriesRequest(proto.Message): - r"""Request message for - [TensorboardService.BatchCreateTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.BatchCreateTensorboardTimeSeries]. - - Attributes: - parent (str): - Required. The resource name of the TensorboardExperiment to - create the TensorboardTimeSeries in. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` - The TensorboardRuns referenced by the parent fields in the - CreateTensorboardTimeSeriesRequest messages must be sub - resources of this TensorboardExperiment. - requests (Sequence[google.cloud.aiplatform_v1.types.CreateTensorboardTimeSeriesRequest]): - Required. The request message specifying the - TensorboardTimeSeries to create. A maximum of - 1000 TensorboardTimeSeries can be created in a - batch. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - requests = proto.RepeatedField( - proto.MESSAGE, - number=2, - message='CreateTensorboardTimeSeriesRequest', - ) - - -class BatchCreateTensorboardTimeSeriesResponse(proto.Message): - r"""Response message for - [TensorboardService.BatchCreateTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.BatchCreateTensorboardTimeSeries]. - - Attributes: - tensorboard_time_series (Sequence[google.cloud.aiplatform_v1.types.TensorboardTimeSeries]): - The created TensorboardTimeSeries. - """ - - tensorboard_time_series = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_tensorboard_time_series.TensorboardTimeSeries, - ) - - -class CreateTensorboardTimeSeriesRequest(proto.Message): - r"""Request message for - [TensorboardService.CreateTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.CreateTensorboardTimeSeries]. - - Attributes: - parent (str): - Required. The resource name of the TensorboardRun to create - the TensorboardTimeSeries in. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` - tensorboard_time_series_id (str): - Optional. The user specified unique ID to use for the - TensorboardTimeSeries, which will become the final component - of the TensorboardTimeSeries's resource name. This value - should match "[a-z0-9][a-z0-9-]{0, 127}". - tensorboard_time_series (google.cloud.aiplatform_v1.types.TensorboardTimeSeries): - Required. The TensorboardTimeSeries to - create. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - tensorboard_time_series_id = proto.Field( - proto.STRING, - number=3, - ) - tensorboard_time_series = proto.Field( - proto.MESSAGE, - number=2, - message=gca_tensorboard_time_series.TensorboardTimeSeries, - ) - - -class GetTensorboardTimeSeriesRequest(proto.Message): - r"""Request message for - [TensorboardService.GetTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.GetTensorboardTimeSeries]. - - Attributes: - name (str): - Required. The name of the TensorboardTimeSeries resource. - Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListTensorboardTimeSeriesRequest(proto.Message): - r"""Request message for - [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.ListTensorboardTimeSeries]. - - Attributes: - parent (str): - Required. The resource name of the - TensorboardRun to list TensorboardTimeSeries. - Format: - 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}' - filter (str): - Lists the TensorboardTimeSeries that match - the filter expression. - page_size (int): - The maximum number of TensorboardTimeSeries - to return. The service may return fewer than - this value. If unspecified, at most 50 - TensorboardTimeSeries will be returned. The - maximum value is 1000; values above 1000 will be - coerced to 1000. - page_token (str): - A page token, received from a previous - [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.ListTensorboardTimeSeries] - call. Provide this to retrieve the subsequent page. - - When paginating, all other parameters provided to - [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.ListTensorboardTimeSeries] - must match the call that provided the page token. - order_by (str): - Field to use to sort the list. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - order_by = proto.Field( - proto.STRING, - number=5, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=6, - message=field_mask_pb2.FieldMask, - ) - - -class ListTensorboardTimeSeriesResponse(proto.Message): - r"""Response message for - [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.ListTensorboardTimeSeries]. - - Attributes: - tensorboard_time_series (Sequence[google.cloud.aiplatform_v1.types.TensorboardTimeSeries]): - The TensorboardTimeSeries mathching the - request. - next_page_token (str): - A token, which can be sent as - [ListTensorboardTimeSeriesRequest.page_token][google.cloud.aiplatform.v1.ListTensorboardTimeSeriesRequest.page_token] - to retrieve the next page. If this field is omitted, there - are no subsequent pages. - """ - - @property - def raw_page(self): - return self - - tensorboard_time_series = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_tensorboard_time_series.TensorboardTimeSeries, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class UpdateTensorboardTimeSeriesRequest(proto.Message): - r"""Request message for - [TensorboardService.UpdateTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.UpdateTensorboardTimeSeries]. - - Attributes: - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. Field mask is used to specify the fields to be - overwritten in the TensorboardTimeSeries resource by the - update. The fields specified in the update_mask are relative - to the resource, not the full request. A field will be - overwritten if it is in the mask. If the user does not - provide a mask then all fields will be overwritten if new - values are specified. - tensorboard_time_series (google.cloud.aiplatform_v1.types.TensorboardTimeSeries): - Required. The TensorboardTimeSeries' ``name`` field is used - to identify the TensorboardTimeSeries to be updated. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` - """ - - update_mask = proto.Field( - proto.MESSAGE, - number=1, - message=field_mask_pb2.FieldMask, - ) - tensorboard_time_series = proto.Field( - proto.MESSAGE, - number=2, - message=gca_tensorboard_time_series.TensorboardTimeSeries, - ) - - -class DeleteTensorboardTimeSeriesRequest(proto.Message): - r"""Request message for - [TensorboardService.DeleteTensorboardTimeSeries][google.cloud.aiplatform.v1.TensorboardService.DeleteTensorboardTimeSeries]. - - Attributes: - name (str): - Required. The name of the TensorboardTimeSeries to be - deleted. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class BatchReadTensorboardTimeSeriesDataRequest(proto.Message): - r"""Request message for - [TensorboardService.BatchReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1.TensorboardService.BatchReadTensorboardTimeSeriesData]. - - Attributes: - tensorboard (str): - Required. The resource name of the Tensorboard containing - TensorboardTimeSeries to read data from. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}``. - The TensorboardTimeSeries referenced by - [time_series][google.cloud.aiplatform.v1.BatchReadTensorboardTimeSeriesDataRequest.time_series] - must be sub resources of this Tensorboard. - time_series (Sequence[str]): - Required. The resource names of the TensorboardTimeSeries to - read data from. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` - """ - - tensorboard = proto.Field( - proto.STRING, - number=1, - ) - time_series = proto.RepeatedField( - proto.STRING, - number=2, - ) - - -class BatchReadTensorboardTimeSeriesDataResponse(proto.Message): - r"""Response message for - [TensorboardService.BatchReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1.TensorboardService.BatchReadTensorboardTimeSeriesData]. - - Attributes: - time_series_data (Sequence[google.cloud.aiplatform_v1.types.TimeSeriesData]): - The returned time series data. - """ - - time_series_data = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=tensorboard_data.TimeSeriesData, - ) - - -class ReadTensorboardTimeSeriesDataRequest(proto.Message): - r"""Request message for - [TensorboardService.ReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1.TensorboardService.ReadTensorboardTimeSeriesData]. - - Attributes: - tensorboard_time_series (str): - Required. The resource name of the TensorboardTimeSeries to - read data from. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` - max_data_points (int): - The maximum number of TensorboardTimeSeries' - data to return. - This value should be a positive integer. - This value can be set to -1 to return all data. - filter (str): - Reads the TensorboardTimeSeries' data that - match the filter expression. - """ - - tensorboard_time_series = proto.Field( - proto.STRING, - number=1, - ) - max_data_points = proto.Field( - proto.INT32, - number=2, - ) - filter = proto.Field( - proto.STRING, - number=3, - ) - - -class ReadTensorboardTimeSeriesDataResponse(proto.Message): - r"""Response message for - [TensorboardService.ReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1.TensorboardService.ReadTensorboardTimeSeriesData]. - - Attributes: - time_series_data (google.cloud.aiplatform_v1.types.TimeSeriesData): - The returned time series data. - """ - - time_series_data = proto.Field( - proto.MESSAGE, - number=1, - message=tensorboard_data.TimeSeriesData, - ) - - -class WriteTensorboardExperimentDataRequest(proto.Message): - r"""Request message for - [TensorboardService.WriteTensorboardExperimentData][google.cloud.aiplatform.v1.TensorboardService.WriteTensorboardExperimentData]. - - Attributes: - tensorboard_experiment (str): - Required. The resource name of the TensorboardExperiment to - write data to. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` - write_run_data_requests (Sequence[google.cloud.aiplatform_v1.types.WriteTensorboardRunDataRequest]): - Required. Requests containing per-run - TensorboardTimeSeries data to write. - """ - - tensorboard_experiment = proto.Field( - proto.STRING, - number=1, - ) - write_run_data_requests = proto.RepeatedField( - proto.MESSAGE, - number=2, - message='WriteTensorboardRunDataRequest', - ) - - -class WriteTensorboardExperimentDataResponse(proto.Message): - r"""Response message for - [TensorboardService.WriteTensorboardExperimentData][google.cloud.aiplatform.v1.TensorboardService.WriteTensorboardExperimentData]. - - """ - - -class WriteTensorboardRunDataRequest(proto.Message): - r"""Request message for - [TensorboardService.WriteTensorboardRunData][google.cloud.aiplatform.v1.TensorboardService.WriteTensorboardRunData]. - - Attributes: - tensorboard_run (str): - Required. The resource name of the TensorboardRun to write - data to. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` - time_series_data (Sequence[google.cloud.aiplatform_v1.types.TimeSeriesData]): - Required. The TensorboardTimeSeries data to - write. Values with in a time series are indexed - by their step value. Repeated writes to the same - step will overwrite the existing value for that - step. - The upper limit of data points per write request - is 5000. - """ - - tensorboard_run = proto.Field( - proto.STRING, - number=1, - ) - time_series_data = proto.RepeatedField( - proto.MESSAGE, - number=2, - message=tensorboard_data.TimeSeriesData, - ) - - -class WriteTensorboardRunDataResponse(proto.Message): - r"""Response message for - [TensorboardService.WriteTensorboardRunData][google.cloud.aiplatform.v1.TensorboardService.WriteTensorboardRunData]. - - """ - - -class ExportTensorboardTimeSeriesDataRequest(proto.Message): - r"""Request message for - [TensorboardService.ExportTensorboardTimeSeriesData][google.cloud.aiplatform.v1.TensorboardService.ExportTensorboardTimeSeriesData]. - - Attributes: - tensorboard_time_series (str): - Required. The resource name of the TensorboardTimeSeries to - export data from. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` - filter (str): - Exports the TensorboardTimeSeries' data that - match the filter expression. - page_size (int): - The maximum number of data points to return per page. The - default page_size will be 1000. Values must be between 1 and - 10000. Values above 10000 will be coerced to 10000. - page_token (str): - A page token, received from a previous - [TensorboardService.ExportTensorboardTimeSeries][] call. - Provide this to retrieve the subsequent page. - - When paginating, all other parameters provided to - [TensorboardService.ExportTensorboardTimeSeries][] must - match the call that provided the page token. - order_by (str): - Field to use to sort the - TensorboardTimeSeries' data. By default, - TensorboardTimeSeries' data will be returned in - a pseudo random order. - """ - - tensorboard_time_series = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - order_by = proto.Field( - proto.STRING, - number=5, - ) - - -class ExportTensorboardTimeSeriesDataResponse(proto.Message): - r"""Response message for - [TensorboardService.ExportTensorboardTimeSeriesData][google.cloud.aiplatform.v1.TensorboardService.ExportTensorboardTimeSeriesData]. - - Attributes: - time_series_data_points (Sequence[google.cloud.aiplatform_v1.types.TimeSeriesDataPoint]): - The returned time series data points. - next_page_token (str): - A token, which can be sent as - [ExportTensorboardTimeSeriesRequest.page_token][] to - retrieve the next page. If this field is omitted, there are - no subsequent pages. - """ - - @property - def raw_page(self): - return self - - time_series_data_points = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=tensorboard_data.TimeSeriesDataPoint, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class CreateTensorboardOperationMetadata(proto.Message): - r"""Details of operations that perform create Tensorboard. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): - Operation metadata for Tensorboard. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class UpdateTensorboardOperationMetadata(proto.Message): - r"""Details of operations that perform update Tensorboard. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): - Operation metadata for Tensorboard. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/tensorboard_time_series.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/tensorboard_time_series.py deleted file mode 100644 index 0f3ab102cc..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/tensorboard_time_series.py +++ /dev/null @@ -1,153 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'TensorboardTimeSeries', - }, -) - - -class TensorboardTimeSeries(proto.Message): - r"""TensorboardTimeSeries maps to times series produced in - training runs - - Attributes: - name (str): - Output only. Name of the - TensorboardTimeSeries. - display_name (str): - Required. User provided name of this - TensorboardTimeSeries. This value should be - unique among all TensorboardTimeSeries resources - belonging to the same TensorboardRun resource - (parent resource). - description (str): - Description of this TensorboardTimeSeries. - value_type (google.cloud.aiplatform_v1.types.TensorboardTimeSeries.ValueType): - Required. Immutable. Type of - TensorboardTimeSeries value. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - TensorboardTimeSeries was created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - TensorboardTimeSeries was last updated. - etag (str): - Used to perform a consistent read-modify- - rite updates. If not set, a blind "overwrite" - update happens. - plugin_name (str): - Immutable. Name of the plugin this time - series pertain to. Such as Scalar, Tensor, Blob - plugin_data (bytes): - Data of the current plugin, with the size - limited to 65KB. - metadata (google.cloud.aiplatform_v1.types.TensorboardTimeSeries.Metadata): - Output only. Scalar, Tensor, or Blob metadata - for this TensorboardTimeSeries. - """ - class ValueType(proto.Enum): - r"""An enum representing the value type of a - TensorboardTimeSeries. - """ - VALUE_TYPE_UNSPECIFIED = 0 - SCALAR = 1 - TENSOR = 2 - BLOB_SEQUENCE = 3 - - class Metadata(proto.Message): - r"""Describes metadata for a TensorboardTimeSeries. - - Attributes: - max_step (int): - Output only. Max step index of all data - points within a TensorboardTimeSeries. - max_wall_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Max wall clock timestamp of all - data points within a TensorboardTimeSeries. - max_blob_sequence_length (int): - Output only. The largest blob sequence length (number of - blobs) of all data points in this time series, if its - ValueType is BLOB_SEQUENCE. - """ - - max_step = proto.Field( - proto.INT64, - number=1, - ) - max_wall_time = proto.Field( - proto.MESSAGE, - number=2, - message=timestamp_pb2.Timestamp, - ) - max_blob_sequence_length = proto.Field( - proto.INT64, - number=3, - ) - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - description = proto.Field( - proto.STRING, - number=3, - ) - value_type = proto.Field( - proto.ENUM, - number=4, - enum=ValueType, - ) - create_time = proto.Field( - proto.MESSAGE, - number=5, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=6, - message=timestamp_pb2.Timestamp, - ) - etag = proto.Field( - proto.STRING, - number=7, - ) - plugin_name = proto.Field( - proto.STRING, - number=8, - ) - plugin_data = proto.Field( - proto.BYTES, - number=9, - ) - metadata = proto.Field( - proto.MESSAGE, - number=10, - message=Metadata, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/training_pipeline.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/training_pipeline.py deleted file mode 100644 index 23e6ae9f85..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/training_pipeline.py +++ /dev/null @@ -1,632 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec -from google.cloud.aiplatform_v1.types import io -from google.cloud.aiplatform_v1.types import model -from google.cloud.aiplatform_v1.types import pipeline_state -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'TrainingPipeline', - 'InputDataConfig', - 'FractionSplit', - 'FilterSplit', - 'PredefinedSplit', - 'TimestampSplit', - 'StratifiedSplit', - }, -) - - -class TrainingPipeline(proto.Message): - r"""The TrainingPipeline orchestrates tasks associated with training a - Model. It always executes the training task, and optionally may also - export data from Vertex AI's Dataset which becomes the training - input, [upload][google.cloud.aiplatform.v1.ModelService.UploadModel] - the Model to Vertex AI, and evaluate the Model. - - Attributes: - name (str): - Output only. Resource name of the - TrainingPipeline. - display_name (str): - Required. The user-defined name of this - TrainingPipeline. - input_data_config (google.cloud.aiplatform_v1.types.InputDataConfig): - Specifies Vertex AI owned input data that may be used for - training the Model. The TrainingPipeline's - [training_task_definition][google.cloud.aiplatform.v1.TrainingPipeline.training_task_definition] - should make clear whether this config is used and if there - are any special requirements on how it should be filled. If - nothing about this config is mentioned in the - [training_task_definition][google.cloud.aiplatform.v1.TrainingPipeline.training_task_definition], - then it should be assumed that the TrainingPipeline does not - depend on this configuration. - training_task_definition (str): - Required. A Google Cloud Storage path to the - YAML file that defines the training task which - is responsible for producing the model artifact, - and may also include additional auxiliary work. - The definition files that can be used here are - found in gs://google-cloud- - aiplatform/schema/trainingjob/definition/. Note: - The URI given on output will be immutable and - probably different, including the URI scheme, - than the one given on input. The output URI will - point to a location where the user only has a - read access. - training_task_inputs (google.protobuf.struct_pb2.Value): - Required. The training task's parameter(s), as specified in - the - [training_task_definition][google.cloud.aiplatform.v1.TrainingPipeline.training_task_definition]'s - ``inputs``. - training_task_metadata (google.protobuf.struct_pb2.Value): - Output only. The metadata information as specified in the - [training_task_definition][google.cloud.aiplatform.v1.TrainingPipeline.training_task_definition]'s - ``metadata``. This metadata is an auxiliary runtime and - final information about the training task. While the - pipeline is running this information is populated only at a - best effort basis. Only present if the pipeline's - [training_task_definition][google.cloud.aiplatform.v1.TrainingPipeline.training_task_definition] - contains ``metadata`` object. - model_to_upload (google.cloud.aiplatform_v1.types.Model): - Describes the Model that may be uploaded (via - [ModelService.UploadModel][google.cloud.aiplatform.v1.ModelService.UploadModel]) - by this TrainingPipeline. The TrainingPipeline's - [training_task_definition][google.cloud.aiplatform.v1.TrainingPipeline.training_task_definition] - should make clear whether this Model description should be - populated, and if there are any special requirements - regarding how it should be filled. If nothing is mentioned - in the - [training_task_definition][google.cloud.aiplatform.v1.TrainingPipeline.training_task_definition], - then it should be assumed that this field should not be - filled and the training task either uploads the Model - without a need of this information, or that training task - does not support uploading a Model as part of the pipeline. - When the Pipeline's state becomes - ``PIPELINE_STATE_SUCCEEDED`` and the trained Model had been - uploaded into Vertex AI, then the model_to_upload's resource - [name][google.cloud.aiplatform.v1.Model.name] is populated. - The Model is always uploaded into the Project and Location - in which this pipeline is. - state (google.cloud.aiplatform_v1.types.PipelineState): - Output only. The detailed state of the - pipeline. - error (google.rpc.status_pb2.Status): - Output only. Only populated when the pipeline's state is - ``PIPELINE_STATE_FAILED`` or ``PIPELINE_STATE_CANCELLED``. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the TrainingPipeline - was created. - start_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the TrainingPipeline for the first - time entered the ``PIPELINE_STATE_RUNNING`` state. - end_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the TrainingPipeline entered any of - the following states: ``PIPELINE_STATE_SUCCEEDED``, - ``PIPELINE_STATE_FAILED``, ``PIPELINE_STATE_CANCELLED``. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the TrainingPipeline - was most recently updated. - labels (Sequence[google.cloud.aiplatform_v1.types.TrainingPipeline.LabelsEntry]): - The labels with user-defined metadata to - organize TrainingPipelines. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. - See https://goo.gl/xmQnxf for more information - and examples of labels. - encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec): - Customer-managed encryption key spec for a TrainingPipeline. - If set, this TrainingPipeline will be secured by this key. - - Note: Model trained by this TrainingPipeline is also secured - by this key if - [model_to_upload][google.cloud.aiplatform.v1.TrainingPipeline.encryption_spec] - is not set separately. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - input_data_config = proto.Field( - proto.MESSAGE, - number=3, - message='InputDataConfig', - ) - training_task_definition = proto.Field( - proto.STRING, - number=4, - ) - training_task_inputs = proto.Field( - proto.MESSAGE, - number=5, - message=struct_pb2.Value, - ) - training_task_metadata = proto.Field( - proto.MESSAGE, - number=6, - message=struct_pb2.Value, - ) - model_to_upload = proto.Field( - proto.MESSAGE, - number=7, - message=model.Model, - ) - state = proto.Field( - proto.ENUM, - number=9, - enum=pipeline_state.PipelineState, - ) - error = proto.Field( - proto.MESSAGE, - number=10, - message=status_pb2.Status, - ) - create_time = proto.Field( - proto.MESSAGE, - number=11, - message=timestamp_pb2.Timestamp, - ) - start_time = proto.Field( - proto.MESSAGE, - number=12, - message=timestamp_pb2.Timestamp, - ) - end_time = proto.Field( - proto.MESSAGE, - number=13, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=14, - message=timestamp_pb2.Timestamp, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=15, - ) - encryption_spec = proto.Field( - proto.MESSAGE, - number=18, - message=gca_encryption_spec.EncryptionSpec, - ) - - -class InputDataConfig(proto.Message): - r"""Specifies Vertex AI owned input data to be used for training, - and possibly evaluating, the Model. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - fraction_split (google.cloud.aiplatform_v1.types.FractionSplit): - Split based on fractions defining the size of - each set. - - This field is a member of `oneof`_ ``split``. - filter_split (google.cloud.aiplatform_v1.types.FilterSplit): - Split based on the provided filters for each - set. - - This field is a member of `oneof`_ ``split``. - predefined_split (google.cloud.aiplatform_v1.types.PredefinedSplit): - Supported only for tabular Datasets. - Split based on a predefined key. - - This field is a member of `oneof`_ ``split``. - timestamp_split (google.cloud.aiplatform_v1.types.TimestampSplit): - Supported only for tabular Datasets. - Split based on the timestamp of the input data - pieces. - - This field is a member of `oneof`_ ``split``. - stratified_split (google.cloud.aiplatform_v1.types.StratifiedSplit): - Supported only for tabular Datasets. - Split based on the distribution of the specified - column. - - This field is a member of `oneof`_ ``split``. - gcs_destination (google.cloud.aiplatform_v1.types.GcsDestination): - The Cloud Storage location where the training data is to be - written to. In the given directory a new directory is - created with name: - ``dataset---`` - where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 - format. All training input data is written into that - directory. - - The Vertex AI environment variables representing Cloud - Storage data URIs are represented in the Cloud Storage - wildcard format to support sharded data. e.g.: - "gs://.../training-*.jsonl" - - - AIP_DATA_FORMAT = "jsonl" for non-tabular data, "csv" for - tabular data - - - AIP_TRAINING_DATA_URI = - "gcs_destination/dataset---/training-*.${AIP_DATA_FORMAT}" - - - AIP_VALIDATION_DATA_URI = - "gcs_destination/dataset---/validation-*.${AIP_DATA_FORMAT}" - - - AIP_TEST_DATA_URI = - "gcs_destination/dataset---/test-*.${AIP_DATA_FORMAT}". - - This field is a member of `oneof`_ ``destination``. - bigquery_destination (google.cloud.aiplatform_v1.types.BigQueryDestination): - Only applicable to custom training with tabular Dataset with - BigQuery source. - - The BigQuery project location where the training data is to - be written to. In the given project a new dataset is created - with name - ``dataset___`` - where timestamp is in YYYY_MM_DDThh_mm_ss_sssZ format. All - training input data is written into that dataset. In the - dataset three tables are created, ``training``, - ``validation`` and ``test``. - - - AIP_DATA_FORMAT = "bigquery". - - - AIP_TRAINING_DATA_URI = - "bigquery_destination.dataset\_\ **\ .training" - - - AIP_VALIDATION_DATA_URI = - "bigquery_destination.dataset\_\ **\ .validation" - - - AIP_TEST_DATA_URI = - "bigquery_destination.dataset\_\ **\ .test". - - This field is a member of `oneof`_ ``destination``. - dataset_id (str): - Required. The ID of the Dataset in the same Project and - Location which data will be used to train the Model. The - Dataset must use schema compatible with Model being trained, - and what is compatible should be described in the used - TrainingPipeline's [training_task_definition] - [google.cloud.aiplatform.v1.TrainingPipeline.training_task_definition]. - For tabular Datasets, all their data is exported to - training, to pick and choose from. - annotations_filter (str): - Applicable only to Datasets that have DataItems and - Annotations. - - A filter on Annotations of the Dataset. Only Annotations - that both match this filter and belong to DataItems not - ignored by the split method are used in respectively - training, validation or test role, depending on the role of - the DataItem they are on (for the auto-assigned that role is - decided by Vertex AI). A filter with same syntax as the one - used in - [ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations] - may be used, but note here it filters across all Annotations - of the Dataset, and not just within a single DataItem. - annotation_schema_uri (str): - Applicable only to custom training with Datasets that have - DataItems and Annotations. - - Cloud Storage URI that points to a YAML file describing the - annotation schema. The schema is defined as an OpenAPI 3.0.2 - `Schema - Object `__. - The schema files that can be used here are found in - gs://google-cloud-aiplatform/schema/dataset/annotation/ , - note that the chosen schema must be consistent with - [metadata][google.cloud.aiplatform.v1.Dataset.metadata_schema_uri] - of the Dataset specified by - [dataset_id][google.cloud.aiplatform.v1.InputDataConfig.dataset_id]. - - Only Annotations that both match this schema and belong to - DataItems not ignored by the split method are used in - respectively training, validation or test role, depending on - the role of the DataItem they are on. - - When used in conjunction with - [annotations_filter][google.cloud.aiplatform.v1.InputDataConfig.annotations_filter], - the Annotations used for training are filtered by both - [annotations_filter][google.cloud.aiplatform.v1.InputDataConfig.annotations_filter] - and - [annotation_schema_uri][google.cloud.aiplatform.v1.InputDataConfig.annotation_schema_uri]. - """ - - fraction_split = proto.Field( - proto.MESSAGE, - number=2, - oneof='split', - message='FractionSplit', - ) - filter_split = proto.Field( - proto.MESSAGE, - number=3, - oneof='split', - message='FilterSplit', - ) - predefined_split = proto.Field( - proto.MESSAGE, - number=4, - oneof='split', - message='PredefinedSplit', - ) - timestamp_split = proto.Field( - proto.MESSAGE, - number=5, - oneof='split', - message='TimestampSplit', - ) - stratified_split = proto.Field( - proto.MESSAGE, - number=12, - oneof='split', - message='StratifiedSplit', - ) - gcs_destination = proto.Field( - proto.MESSAGE, - number=8, - oneof='destination', - message=io.GcsDestination, - ) - bigquery_destination = proto.Field( - proto.MESSAGE, - number=10, - oneof='destination', - message=io.BigQueryDestination, - ) - dataset_id = proto.Field( - proto.STRING, - number=1, - ) - annotations_filter = proto.Field( - proto.STRING, - number=6, - ) - annotation_schema_uri = proto.Field( - proto.STRING, - number=9, - ) - - -class FractionSplit(proto.Message): - r"""Assigns the input data to training, validation, and test sets as per - the given fractions. Any of ``training_fraction``, - ``validation_fraction`` and ``test_fraction`` may optionally be - provided, they must sum to up to 1. If the provided ones sum to less - than 1, the remainder is assigned to sets as decided by Vertex AI. - If none of the fractions are set, by default roughly 80% of data is - used for training, 10% for validation, and 10% for test. - - Attributes: - training_fraction (float): - The fraction of the input data that is to be - used to train the Model. - validation_fraction (float): - The fraction of the input data that is to be - used to validate the Model. - test_fraction (float): - The fraction of the input data that is to be - used to evaluate the Model. - """ - - training_fraction = proto.Field( - proto.DOUBLE, - number=1, - ) - validation_fraction = proto.Field( - proto.DOUBLE, - number=2, - ) - test_fraction = proto.Field( - proto.DOUBLE, - number=3, - ) - - -class FilterSplit(proto.Message): - r"""Assigns input data to training, validation, and test sets - based on the given filters, data pieces not matched by any - filter are ignored. Currently only supported for Datasets - containing DataItems. - If any of the filters in this message are to match nothing, then - they can be set as '-' (the minus sign). - - Supported only for unstructured Datasets. - - Attributes: - training_filter (str): - Required. A filter on DataItems of the Dataset. DataItems - that match this filter are used to train the Model. A filter - with same syntax as the one used in - [DatasetService.ListDataItems][google.cloud.aiplatform.v1.DatasetService.ListDataItems] - may be used. If a single DataItem is matched by more than - one of the FilterSplit filters, then it is assigned to the - first set that applies to it in the training, validation, - test order. - validation_filter (str): - Required. A filter on DataItems of the Dataset. DataItems - that match this filter are used to validate the Model. A - filter with same syntax as the one used in - [DatasetService.ListDataItems][google.cloud.aiplatform.v1.DatasetService.ListDataItems] - may be used. If a single DataItem is matched by more than - one of the FilterSplit filters, then it is assigned to the - first set that applies to it in the training, validation, - test order. - test_filter (str): - Required. A filter on DataItems of the Dataset. DataItems - that match this filter are used to test the Model. A filter - with same syntax as the one used in - [DatasetService.ListDataItems][google.cloud.aiplatform.v1.DatasetService.ListDataItems] - may be used. If a single DataItem is matched by more than - one of the FilterSplit filters, then it is assigned to the - first set that applies to it in the training, validation, - test order. - """ - - training_filter = proto.Field( - proto.STRING, - number=1, - ) - validation_filter = proto.Field( - proto.STRING, - number=2, - ) - test_filter = proto.Field( - proto.STRING, - number=3, - ) - - -class PredefinedSplit(proto.Message): - r"""Assigns input data to training, validation, and test sets - based on the value of a provided key. - - Supported only for tabular Datasets. - - Attributes: - key (str): - Required. The key is a name of one of the Dataset's data - columns. The value of the key (either the label's value or - value in the column) must be one of {``training``, - ``validation``, ``test``}, and it defines to which set the - given piece of data is assigned. If for a piece of data the - key is not present or has an invalid value, that piece is - ignored by the pipeline. - """ - - key = proto.Field( - proto.STRING, - number=1, - ) - - -class TimestampSplit(proto.Message): - r"""Assigns input data to training, validation, and test sets - based on a provided timestamps. The youngest data pieces are - assigned to training set, next to validation set, and the oldest - to the test set. - Supported only for tabular Datasets. - - Attributes: - training_fraction (float): - The fraction of the input data that is to be - used to train the Model. - validation_fraction (float): - The fraction of the input data that is to be - used to validate the Model. - test_fraction (float): - The fraction of the input data that is to be - used to evaluate the Model. - key (str): - Required. The key is a name of one of the Dataset's data - columns. The values of the key (the values in the column) - must be in RFC 3339 ``date-time`` format, where - ``time-offset`` = ``"Z"`` (e.g. 1985-04-12T23:20:50.52Z). If - for a piece of data the key is not present or has an invalid - value, that piece is ignored by the pipeline. - """ - - training_fraction = proto.Field( - proto.DOUBLE, - number=1, - ) - validation_fraction = proto.Field( - proto.DOUBLE, - number=2, - ) - test_fraction = proto.Field( - proto.DOUBLE, - number=3, - ) - key = proto.Field( - proto.STRING, - number=4, - ) - - -class StratifiedSplit(proto.Message): - r"""Assigns input data to the training, validation, and test sets so - that the distribution of values found in the categorical column (as - specified by the ``key`` field) is mirrored within each split. The - fraction values determine the relative sizes of the splits. - - For example, if the specified column has three values, with 50% of - the rows having value "A", 25% value "B", and 25% value "C", and the - split fractions are specified as 80/10/10, then the training set - will constitute 80% of the training data, with about 50% of the - training set rows having the value "A" for the specified column, - about 25% having the value "B", and about 25% having the value "C". - - Only the top 500 occurring values are used; any values not in the - top 500 values are randomly assigned to a split. If less than three - rows contain a specific value, those rows are randomly assigned. - - Supported only for tabular Datasets. - - Attributes: - training_fraction (float): - The fraction of the input data that is to be - used to train the Model. - validation_fraction (float): - The fraction of the input data that is to be - used to validate the Model. - test_fraction (float): - The fraction of the input data that is to be - used to evaluate the Model. - key (str): - Required. The key is a name of one of the - Dataset's data columns. The key provided must be - for a categorical column. - """ - - training_fraction = proto.Field( - proto.DOUBLE, - number=1, - ) - validation_fraction = proto.Field( - proto.DOUBLE, - number=2, - ) - test_fraction = proto.Field( - proto.DOUBLE, - number=3, - ) - key = proto.Field( - proto.STRING, - number=4, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/types.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/types.py deleted file mode 100644 index f85b6e105c..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/types.py +++ /dev/null @@ -1,86 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'BoolArray', - 'DoubleArray', - 'Int64Array', - 'StringArray', - }, -) - - -class BoolArray(proto.Message): - r"""A list of boolean values. - - Attributes: - values (Sequence[bool]): - A list of bool values. - """ - - values = proto.RepeatedField( - proto.BOOL, - number=1, - ) - - -class DoubleArray(proto.Message): - r"""A list of double values. - - Attributes: - values (Sequence[float]): - A list of bool values. - """ - - values = proto.RepeatedField( - proto.DOUBLE, - number=1, - ) - - -class Int64Array(proto.Message): - r"""A list of int64 values. - - Attributes: - values (Sequence[int]): - A list of int64 values. - """ - - values = proto.RepeatedField( - proto.INT64, - number=1, - ) - - -class StringArray(proto.Message): - r"""A list of string values. - - Attributes: - values (Sequence[str]): - A list of string values. - """ - - values = proto.RepeatedField( - proto.STRING, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/user_action_reference.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/user_action_reference.py deleted file mode 100644 index b70f6840e4..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/user_action_reference.py +++ /dev/null @@ -1,75 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'UserActionReference', - }, -) - - -class UserActionReference(proto.Message): - r"""References an API call. It contains more information about - long running operation and Jobs that are triggered by the API - call. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - operation (str): - For API calls that return a long running - operation. Resource name of the long running - operation. Format: - 'projects/{project}/locations/{location}/operations/{operation}' - - This field is a member of `oneof`_ ``reference``. - data_labeling_job (str): - For API calls that start a LabelingJob. Resource name of the - LabelingJob. Format: - 'projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}' - - This field is a member of `oneof`_ ``reference``. - method (str): - The method name of the API RPC call. For - example, - "/google.cloud.aiplatform.{apiVersion}.DatasetService.CreateDataset". - """ - - operation = proto.Field( - proto.STRING, - number=1, - oneof='reference', - ) - data_labeling_job = proto.Field( - proto.STRING, - number=2, - oneof='reference', - ) - method = proto.Field( - proto.STRING, - number=3, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/value.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/value.py deleted file mode 100644 index ed8d048e6c..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/value.py +++ /dev/null @@ -1,69 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'Value', - }, -) - - -class Value(proto.Message): - r"""Value is the value of the field. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - int_value (int): - An integer value. - - This field is a member of `oneof`_ ``value``. - double_value (float): - A double value. - - This field is a member of `oneof`_ ``value``. - string_value (str): - A string value. - - This field is a member of `oneof`_ ``value``. - """ - - int_value = proto.Field( - proto.INT64, - number=1, - oneof='value', - ) - double_value = proto.Field( - proto.DOUBLE, - number=2, - oneof='value', - ) - string_value = proto.Field( - proto.STRING, - number=3, - oneof='value', - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/vizier_service.py b/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/vizier_service.py deleted file mode 100644 index 4dc26a91f5..0000000000 --- a/owl-bot-staging/v1/google/cloud/aiplatform_v1/types/vizier_service.py +++ /dev/null @@ -1,589 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1.types import operation -from google.cloud.aiplatform_v1.types import study as gca_study -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1', - manifest={ - 'GetStudyRequest', - 'CreateStudyRequest', - 'ListStudiesRequest', - 'ListStudiesResponse', - 'DeleteStudyRequest', - 'LookupStudyRequest', - 'SuggestTrialsRequest', - 'SuggestTrialsResponse', - 'SuggestTrialsMetadata', - 'CreateTrialRequest', - 'GetTrialRequest', - 'ListTrialsRequest', - 'ListTrialsResponse', - 'AddTrialMeasurementRequest', - 'CompleteTrialRequest', - 'DeleteTrialRequest', - 'CheckTrialEarlyStoppingStateRequest', - 'CheckTrialEarlyStoppingStateResponse', - 'CheckTrialEarlyStoppingStateMetatdata', - 'StopTrialRequest', - 'ListOptimalTrialsRequest', - 'ListOptimalTrialsResponse', - }, -) - - -class GetStudyRequest(proto.Message): - r"""Request message for - [VizierService.GetStudy][google.cloud.aiplatform.v1.VizierService.GetStudy]. - - Attributes: - name (str): - Required. The name of the Study resource. Format: - ``projects/{project}/locations/{location}/studies/{study}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class CreateStudyRequest(proto.Message): - r"""Request message for - [VizierService.CreateStudy][google.cloud.aiplatform.v1.VizierService.CreateStudy]. - - Attributes: - parent (str): - Required. The resource name of the Location to create the - CustomJob in. Format: - ``projects/{project}/locations/{location}`` - study (google.cloud.aiplatform_v1.types.Study): - Required. The Study configuration used to - create the Study. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - study = proto.Field( - proto.MESSAGE, - number=2, - message=gca_study.Study, - ) - - -class ListStudiesRequest(proto.Message): - r"""Request message for - [VizierService.ListStudies][google.cloud.aiplatform.v1.VizierService.ListStudies]. - - Attributes: - parent (str): - Required. The resource name of the Location to list the - Study from. Format: - ``projects/{project}/locations/{location}`` - page_token (str): - Optional. A page token to request the next - page of results. If unspecified, there are no - subsequent pages. - page_size (int): - Optional. The maximum number of studies to - return per "page" of results. If unspecified, - service will pick an appropriate default. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - page_token = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - - -class ListStudiesResponse(proto.Message): - r"""Response message for - [VizierService.ListStudies][google.cloud.aiplatform.v1.VizierService.ListStudies]. - - Attributes: - studies (Sequence[google.cloud.aiplatform_v1.types.Study]): - The studies associated with the project. - next_page_token (str): - Passes this token as the ``page_token`` field of the request - for a subsequent call. If this field is omitted, there are - no subsequent pages. - """ - - @property - def raw_page(self): - return self - - studies = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_study.Study, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class DeleteStudyRequest(proto.Message): - r"""Request message for - [VizierService.DeleteStudy][google.cloud.aiplatform.v1.VizierService.DeleteStudy]. - - Attributes: - name (str): - Required. The name of the Study resource to be deleted. - Format: - ``projects/{project}/locations/{location}/studies/{study}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class LookupStudyRequest(proto.Message): - r"""Request message for - [VizierService.LookupStudy][google.cloud.aiplatform.v1.VizierService.LookupStudy]. - - Attributes: - parent (str): - Required. The resource name of the Location to get the Study - from. Format: ``projects/{project}/locations/{location}`` - display_name (str): - Required. The user-defined display name of - the Study - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - - -class SuggestTrialsRequest(proto.Message): - r"""Request message for - [VizierService.SuggestTrials][google.cloud.aiplatform.v1.VizierService.SuggestTrials]. - - Attributes: - parent (str): - Required. The project and location that the Study belongs - to. Format: - ``projects/{project}/locations/{location}/studies/{study}`` - suggestion_count (int): - Required. The number of suggestions - requested. - client_id (str): - Required. The identifier of the client that is requesting - the suggestion. - - If multiple SuggestTrialsRequests have the same - ``client_id``, the service will return the identical - suggested Trial if the Trial is pending, and provide a new - Trial if the last suggested Trial was completed. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - suggestion_count = proto.Field( - proto.INT32, - number=2, - ) - client_id = proto.Field( - proto.STRING, - number=3, - ) - - -class SuggestTrialsResponse(proto.Message): - r"""Response message for - [VizierService.SuggestTrials][google.cloud.aiplatform.v1.VizierService.SuggestTrials]. - - Attributes: - trials (Sequence[google.cloud.aiplatform_v1.types.Trial]): - A list of Trials. - study_state (google.cloud.aiplatform_v1.types.Study.State): - The state of the Study. - start_time (google.protobuf.timestamp_pb2.Timestamp): - The time at which the operation was started. - end_time (google.protobuf.timestamp_pb2.Timestamp): - The time at which operation processing - completed. - """ - - trials = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_study.Trial, - ) - study_state = proto.Field( - proto.ENUM, - number=2, - enum=gca_study.Study.State, - ) - start_time = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - end_time = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - - -class SuggestTrialsMetadata(proto.Message): - r"""Details of operations that perform Trials suggestion. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): - Operation metadata for suggesting Trials. - client_id (str): - The identifier of the client that is requesting the - suggestion. - - If multiple SuggestTrialsRequests have the same - ``client_id``, the service will return the identical - suggested Trial if the Trial is pending, and provide a new - Trial if the last suggested Trial was completed. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - client_id = proto.Field( - proto.STRING, - number=2, - ) - - -class CreateTrialRequest(proto.Message): - r"""Request message for - [VizierService.CreateTrial][google.cloud.aiplatform.v1.VizierService.CreateTrial]. - - Attributes: - parent (str): - Required. The resource name of the Study to create the Trial - in. Format: - ``projects/{project}/locations/{location}/studies/{study}`` - trial (google.cloud.aiplatform_v1.types.Trial): - Required. The Trial to create. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - trial = proto.Field( - proto.MESSAGE, - number=2, - message=gca_study.Trial, - ) - - -class GetTrialRequest(proto.Message): - r"""Request message for - [VizierService.GetTrial][google.cloud.aiplatform.v1.VizierService.GetTrial]. - - Attributes: - name (str): - Required. The name of the Trial resource. Format: - ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListTrialsRequest(proto.Message): - r"""Request message for - [VizierService.ListTrials][google.cloud.aiplatform.v1.VizierService.ListTrials]. - - Attributes: - parent (str): - Required. The resource name of the Study to list the Trial - from. Format: - ``projects/{project}/locations/{location}/studies/{study}`` - page_token (str): - Optional. A page token to request the next - page of results. If unspecified, there are no - subsequent pages. - page_size (int): - Optional. The number of Trials to retrieve - per "page" of results. If unspecified, the - service will pick an appropriate default. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - page_token = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - - -class ListTrialsResponse(proto.Message): - r"""Response message for - [VizierService.ListTrials][google.cloud.aiplatform.v1.VizierService.ListTrials]. - - Attributes: - trials (Sequence[google.cloud.aiplatform_v1.types.Trial]): - The Trials associated with the Study. - next_page_token (str): - Pass this token as the ``page_token`` field of the request - for a subsequent call. If this field is omitted, there are - no subsequent pages. - """ - - @property - def raw_page(self): - return self - - trials = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_study.Trial, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class AddTrialMeasurementRequest(proto.Message): - r"""Request message for - [VizierService.AddTrialMeasurement][google.cloud.aiplatform.v1.VizierService.AddTrialMeasurement]. - - Attributes: - trial_name (str): - Required. The name of the trial to add measurement. Format: - ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` - measurement (google.cloud.aiplatform_v1.types.Measurement): - Required. The measurement to be added to a - Trial. - """ - - trial_name = proto.Field( - proto.STRING, - number=1, - ) - measurement = proto.Field( - proto.MESSAGE, - number=3, - message=gca_study.Measurement, - ) - - -class CompleteTrialRequest(proto.Message): - r"""Request message for - [VizierService.CompleteTrial][google.cloud.aiplatform.v1.VizierService.CompleteTrial]. - - Attributes: - name (str): - Required. The Trial's name. Format: - ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` - final_measurement (google.cloud.aiplatform_v1.types.Measurement): - Optional. If provided, it will be used as the completed - Trial's final_measurement; Otherwise, the service will - auto-select a previously reported measurement as the - final-measurement - trial_infeasible (bool): - Optional. True if the Trial cannot be run with the given - Parameter, and final_measurement will be ignored. - infeasible_reason (str): - Optional. A human readable reason why the trial was - infeasible. This should only be provided if - ``trial_infeasible`` is true. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - final_measurement = proto.Field( - proto.MESSAGE, - number=2, - message=gca_study.Measurement, - ) - trial_infeasible = proto.Field( - proto.BOOL, - number=3, - ) - infeasible_reason = proto.Field( - proto.STRING, - number=4, - ) - - -class DeleteTrialRequest(proto.Message): - r"""Request message for - [VizierService.DeleteTrial][google.cloud.aiplatform.v1.VizierService.DeleteTrial]. - - Attributes: - name (str): - Required. The Trial's name. Format: - ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class CheckTrialEarlyStoppingStateRequest(proto.Message): - r"""Request message for - [VizierService.CheckTrialEarlyStoppingState][google.cloud.aiplatform.v1.VizierService.CheckTrialEarlyStoppingState]. - - Attributes: - trial_name (str): - Required. The Trial's name. Format: - ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` - """ - - trial_name = proto.Field( - proto.STRING, - number=1, - ) - - -class CheckTrialEarlyStoppingStateResponse(proto.Message): - r"""Response message for - [VizierService.CheckTrialEarlyStoppingState][google.cloud.aiplatform.v1.VizierService.CheckTrialEarlyStoppingState]. - - Attributes: - should_stop (bool): - True if the Trial should stop. - """ - - should_stop = proto.Field( - proto.BOOL, - number=1, - ) - - -class CheckTrialEarlyStoppingStateMetatdata(proto.Message): - r"""This message will be placed in the metadata field of a - google.longrunning.Operation associated with a - CheckTrialEarlyStoppingState request. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): - Operation metadata for suggesting Trials. - study (str): - The name of the Study that the Trial belongs - to. - trial (str): - The Trial name. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - study = proto.Field( - proto.STRING, - number=2, - ) - trial = proto.Field( - proto.STRING, - number=3, - ) - - -class StopTrialRequest(proto.Message): - r"""Request message for - [VizierService.StopTrial][google.cloud.aiplatform.v1.VizierService.StopTrial]. - - Attributes: - name (str): - Required. The Trial's name. Format: - ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListOptimalTrialsRequest(proto.Message): - r"""Request message for - [VizierService.ListOptimalTrials][google.cloud.aiplatform.v1.VizierService.ListOptimalTrials]. - - Attributes: - parent (str): - Required. The name of the Study that the - optimal Trial belongs to. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - - -class ListOptimalTrialsResponse(proto.Message): - r"""Response message for - [VizierService.ListOptimalTrials][google.cloud.aiplatform.v1.VizierService.ListOptimalTrials]. - - Attributes: - optimal_trials (Sequence[google.cloud.aiplatform_v1.types.Trial]): - The pareto-optimal Trials for multiple objective Study or - the optimal trial for single objective Study. The definition - of pareto-optimal can be checked in wiki page. - https://en.wikipedia.org/wiki/Pareto_efficiency - """ - - optimal_trials = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_study.Trial, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/mypy.ini b/owl-bot-staging/v1/mypy.ini deleted file mode 100644 index 4505b48543..0000000000 --- a/owl-bot-staging/v1/mypy.ini +++ /dev/null @@ -1,3 +0,0 @@ -[mypy] -python_version = 3.6 -namespace_packages = True diff --git a/owl-bot-staging/v1/noxfile.py b/owl-bot-staging/v1/noxfile.py deleted file mode 100644 index ed10fbc787..0000000000 --- a/owl-bot-staging/v1/noxfile.py +++ /dev/null @@ -1,132 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import pathlib -import shutil -import subprocess -import sys - - -import nox # type: ignore - -CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() - -LOWER_BOUND_CONSTRAINTS_FILE = CURRENT_DIRECTORY / "constraints.txt" -PACKAGE_NAME = subprocess.check_output([sys.executable, "setup.py", "--name"], encoding="utf-8") - - -nox.sessions = [ - "unit", - "cover", - "mypy", - "check_lower_bounds" - # exclude update_lower_bounds from default - "docs", -] - -@nox.session(python=['3.6', '3.7', '3.8', '3.9', '3.10']) -def unit(session): - """Run the unit test suite.""" - - session.install('coverage', 'pytest', 'pytest-cov', 'asyncmock', 'pytest-asyncio') - session.install('-e', '.') - - session.run( - 'py.test', - '--quiet', - '--cov=google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/', - '--cov-config=.coveragerc', - '--cov-report=term', - '--cov-report=html', - os.path.join('tests', 'unit', ''.join(session.posargs)) - ) - - -@nox.session(python='3.9') -def cover(session): - """Run the final coverage report. - This outputs the coverage report aggregating coverage from the unit - test runs (not system test runs), and then erases coverage data. - """ - session.install("coverage", "pytest-cov") - session.run("coverage", "report", "--show-missing", "--fail-under=100") - - session.run("coverage", "erase") - - -@nox.session(python=['3.6', '3.7', '3.8', '3.9']) -def mypy(session): - """Run the type checker.""" - session.install('mypy', 'types-pkg_resources') - session.install('.') - session.run( - 'mypy', - '--explicit-package-bases', - 'google', - ) - - -@nox.session -def update_lower_bounds(session): - """Update lower bounds in constraints.txt to match setup.py""" - session.install('google-cloud-testutils') - session.install('.') - - session.run( - 'lower-bound-checker', - 'update', - '--package-name', - PACKAGE_NAME, - '--constraints-file', - str(LOWER_BOUND_CONSTRAINTS_FILE), - ) - - -@nox.session -def check_lower_bounds(session): - """Check lower bounds in setup.py are reflected in constraints file""" - session.install('google-cloud-testutils') - session.install('.') - - session.run( - 'lower-bound-checker', - 'check', - '--package-name', - PACKAGE_NAME, - '--constraints-file', - str(LOWER_BOUND_CONSTRAINTS_FILE), - ) - -@nox.session(python='3.9') -def docs(session): - """Build the docs for this library.""" - - session.install("-e", ".") - session.install("sphinx<3.0.0", "alabaster", "recommonmark") - - shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) - session.run( - "sphinx-build", - "-W", # warnings as errors - "-T", # show full traceback on exception - "-N", # no colors - "-b", - "html", - "-d", - os.path.join("docs", "_build", "doctrees", ""), - os.path.join("docs", ""), - os.path.join("docs", "_build", "html", ""), - ) diff --git a/owl-bot-staging/v1/scripts/fixup_aiplatform_v1_keywords.py b/owl-bot-staging/v1/scripts/fixup_aiplatform_v1_keywords.py deleted file mode 100644 index be8731d99a..0000000000 --- a/owl-bot-staging/v1/scripts/fixup_aiplatform_v1_keywords.py +++ /dev/null @@ -1,359 +0,0 @@ -#! /usr/bin/env python3 -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import argparse -import os -import libcst as cst -import pathlib -import sys -from typing import (Any, Callable, Dict, List, Sequence, Tuple) - - -def partition( - predicate: Callable[[Any], bool], - iterator: Sequence[Any] -) -> Tuple[List[Any], List[Any]]: - """A stable, out-of-place partition.""" - results = ([], []) - - for i in iterator: - results[int(predicate(i))].append(i) - - # Returns trueList, falseList - return results[1], results[0] - - -class aiplatformCallTransformer(cst.CSTTransformer): - CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') - METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { - 'add_context_artifacts_and_executions': ('context', 'artifacts', 'executions', ), - 'add_context_children': ('context', 'child_contexts', ), - 'add_execution_events': ('execution', 'events', ), - 'add_trial_measurement': ('trial_name', 'measurement', ), - 'batch_create_features': ('parent', 'requests', ), - 'batch_create_tensorboard_runs': ('parent', 'requests', ), - 'batch_create_tensorboard_time_series': ('parent', 'requests', ), - 'batch_migrate_resources': ('parent', 'migrate_resource_requests', ), - 'batch_read_feature_values': ('featurestore', 'destination', 'entity_type_specs', 'csv_read_instances', 'bigquery_read_instances', 'pass_through_fields', ), - 'batch_read_tensorboard_time_series_data': ('tensorboard', 'time_series', ), - 'cancel_batch_prediction_job': ('name', ), - 'cancel_custom_job': ('name', ), - 'cancel_data_labeling_job': ('name', ), - 'cancel_hyperparameter_tuning_job': ('name', ), - 'cancel_pipeline_job': ('name', ), - 'cancel_training_pipeline': ('name', ), - 'check_trial_early_stopping_state': ('trial_name', ), - 'complete_trial': ('name', 'final_measurement', 'trial_infeasible', 'infeasible_reason', ), - 'create_artifact': ('parent', 'artifact', 'artifact_id', ), - 'create_batch_prediction_job': ('parent', 'batch_prediction_job', ), - 'create_context': ('parent', 'context', 'context_id', ), - 'create_custom_job': ('parent', 'custom_job', ), - 'create_data_labeling_job': ('parent', 'data_labeling_job', ), - 'create_dataset': ('parent', 'dataset', ), - 'create_endpoint': ('parent', 'endpoint', 'endpoint_id', ), - 'create_entity_type': ('parent', 'entity_type_id', 'entity_type', ), - 'create_execution': ('parent', 'execution', 'execution_id', ), - 'create_feature': ('parent', 'feature', 'feature_id', ), - 'create_featurestore': ('parent', 'featurestore', 'featurestore_id', ), - 'create_hyperparameter_tuning_job': ('parent', 'hyperparameter_tuning_job', ), - 'create_index': ('parent', 'index', ), - 'create_index_endpoint': ('parent', 'index_endpoint', ), - 'create_metadata_schema': ('parent', 'metadata_schema', 'metadata_schema_id', ), - 'create_metadata_store': ('parent', 'metadata_store', 'metadata_store_id', ), - 'create_model_deployment_monitoring_job': ('parent', 'model_deployment_monitoring_job', ), - 'create_pipeline_job': ('parent', 'pipeline_job', 'pipeline_job_id', ), - 'create_specialist_pool': ('parent', 'specialist_pool', ), - 'create_study': ('parent', 'study', ), - 'create_tensorboard': ('parent', 'tensorboard', ), - 'create_tensorboard_experiment': ('parent', 'tensorboard_experiment_id', 'tensorboard_experiment', ), - 'create_tensorboard_run': ('parent', 'tensorboard_run', 'tensorboard_run_id', ), - 'create_tensorboard_time_series': ('parent', 'tensorboard_time_series', 'tensorboard_time_series_id', ), - 'create_training_pipeline': ('parent', 'training_pipeline', ), - 'create_trial': ('parent', 'trial', ), - 'delete_artifact': ('name', 'etag', ), - 'delete_batch_prediction_job': ('name', ), - 'delete_context': ('name', 'force', 'etag', ), - 'delete_custom_job': ('name', ), - 'delete_data_labeling_job': ('name', ), - 'delete_dataset': ('name', ), - 'delete_endpoint': ('name', ), - 'delete_entity_type': ('name', 'force', ), - 'delete_execution': ('name', 'etag', ), - 'delete_feature': ('name', ), - 'delete_featurestore': ('name', 'force', ), - 'delete_hyperparameter_tuning_job': ('name', ), - 'delete_index': ('name', ), - 'delete_index_endpoint': ('name', ), - 'delete_metadata_store': ('name', 'force', ), - 'delete_model': ('name', ), - 'delete_model_deployment_monitoring_job': ('name', ), - 'delete_pipeline_job': ('name', ), - 'delete_specialist_pool': ('name', 'force', ), - 'delete_study': ('name', ), - 'delete_tensorboard': ('name', ), - 'delete_tensorboard_experiment': ('name', ), - 'delete_tensorboard_run': ('name', ), - 'delete_tensorboard_time_series': ('name', ), - 'delete_training_pipeline': ('name', ), - 'delete_trial': ('name', ), - 'deploy_index': ('index_endpoint', 'deployed_index', ), - 'deploy_model': ('endpoint', 'deployed_model', 'traffic_split', ), - 'explain': ('endpoint', 'instances', 'parameters', 'explanation_spec_override', 'deployed_model_id', ), - 'export_data': ('name', 'export_config', ), - 'export_feature_values': ('entity_type', 'destination', 'feature_selector', 'snapshot_export', 'full_export', 'settings', ), - 'export_model': ('name', 'output_config', ), - 'export_tensorboard_time_series_data': ('tensorboard_time_series', 'filter', 'page_size', 'page_token', 'order_by', ), - 'get_annotation_spec': ('name', 'read_mask', ), - 'get_artifact': ('name', ), - 'get_batch_prediction_job': ('name', ), - 'get_context': ('name', ), - 'get_custom_job': ('name', ), - 'get_data_labeling_job': ('name', ), - 'get_dataset': ('name', 'read_mask', ), - 'get_endpoint': ('name', ), - 'get_entity_type': ('name', ), - 'get_execution': ('name', ), - 'get_feature': ('name', ), - 'get_featurestore': ('name', ), - 'get_hyperparameter_tuning_job': ('name', ), - 'get_index': ('name', ), - 'get_index_endpoint': ('name', ), - 'get_metadata_schema': ('name', ), - 'get_metadata_store': ('name', ), - 'get_model': ('name', ), - 'get_model_deployment_monitoring_job': ('name', ), - 'get_model_evaluation': ('name', ), - 'get_model_evaluation_slice': ('name', ), - 'get_pipeline_job': ('name', ), - 'get_specialist_pool': ('name', ), - 'get_study': ('name', ), - 'get_tensorboard': ('name', ), - 'get_tensorboard_experiment': ('name', ), - 'get_tensorboard_run': ('name', ), - 'get_tensorboard_time_series': ('name', ), - 'get_training_pipeline': ('name', ), - 'get_trial': ('name', ), - 'import_data': ('name', 'import_configs', ), - 'import_feature_values': ('entity_type', 'feature_specs', 'avro_source', 'bigquery_source', 'csv_source', 'feature_time_field', 'feature_time', 'entity_id_field', 'disable_online_serving', 'worker_count', ), - 'list_annotations': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', 'order_by', ), - 'list_artifacts': ('parent', 'page_size', 'page_token', 'filter', ), - 'list_batch_prediction_jobs': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), - 'list_contexts': ('parent', 'page_size', 'page_token', 'filter', ), - 'list_custom_jobs': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), - 'list_data_items': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', 'order_by', ), - 'list_data_labeling_jobs': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', 'order_by', ), - 'list_datasets': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', 'order_by', ), - 'list_endpoints': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', 'order_by', ), - 'list_entity_types': ('parent', 'filter', 'page_size', 'page_token', 'order_by', 'read_mask', ), - 'list_executions': ('parent', 'page_size', 'page_token', 'filter', ), - 'list_features': ('parent', 'filter', 'page_size', 'page_token', 'order_by', 'read_mask', 'latest_stats_count', ), - 'list_featurestores': ('parent', 'filter', 'page_size', 'page_token', 'order_by', 'read_mask', ), - 'list_hyperparameter_tuning_jobs': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), - 'list_index_endpoints': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), - 'list_indexes': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), - 'list_metadata_schemas': ('parent', 'page_size', 'page_token', 'filter', ), - 'list_metadata_stores': ('parent', 'page_size', 'page_token', ), - 'list_model_deployment_monitoring_jobs': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), - 'list_model_evaluations': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), - 'list_model_evaluation_slices': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), - 'list_models': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', 'order_by', ), - 'list_optimal_trials': ('parent', ), - 'list_pipeline_jobs': ('parent', 'filter', 'page_size', 'page_token', 'order_by', ), - 'list_specialist_pools': ('parent', 'page_size', 'page_token', 'read_mask', ), - 'list_studies': ('parent', 'page_token', 'page_size', ), - 'list_tensorboard_experiments': ('parent', 'filter', 'page_size', 'page_token', 'order_by', 'read_mask', ), - 'list_tensorboard_runs': ('parent', 'filter', 'page_size', 'page_token', 'order_by', 'read_mask', ), - 'list_tensorboards': ('parent', 'filter', 'page_size', 'page_token', 'order_by', 'read_mask', ), - 'list_tensorboard_time_series': ('parent', 'filter', 'page_size', 'page_token', 'order_by', 'read_mask', ), - 'list_training_pipelines': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), - 'list_trials': ('parent', 'page_token', 'page_size', ), - 'lookup_study': ('parent', 'display_name', ), - 'mutate_deployed_index': ('index_endpoint', 'deployed_index', ), - 'pause_model_deployment_monitoring_job': ('name', ), - 'predict': ('endpoint', 'instances', 'parameters', ), - 'purge_artifacts': ('parent', 'filter', 'force', ), - 'purge_contexts': ('parent', 'filter', 'force', ), - 'purge_executions': ('parent', 'filter', 'force', ), - 'query_artifact_lineage_subgraph': ('artifact', 'max_hops', 'filter', ), - 'query_context_lineage_subgraph': ('context', ), - 'query_execution_inputs_and_outputs': ('execution', ), - 'raw_predict': ('endpoint', 'http_body', ), - 'read_feature_values': ('entity_type', 'entity_id', 'feature_selector', ), - 'read_tensorboard_blob_data': ('time_series', 'blob_ids', ), - 'read_tensorboard_time_series_data': ('tensorboard_time_series', 'max_data_points', 'filter', ), - 'resume_model_deployment_monitoring_job': ('name', ), - 'search_features': ('location', 'query', 'page_size', 'page_token', ), - 'search_migratable_resources': ('parent', 'page_size', 'page_token', 'filter', ), - 'search_model_deployment_monitoring_stats_anomalies': ('model_deployment_monitoring_job', 'deployed_model_id', 'objectives', 'feature_display_name', 'page_size', 'page_token', 'start_time', 'end_time', ), - 'stop_trial': ('name', ), - 'streaming_read_feature_values': ('entity_type', 'entity_ids', 'feature_selector', ), - 'suggest_trials': ('parent', 'suggestion_count', 'client_id', ), - 'undeploy_index': ('index_endpoint', 'deployed_index_id', ), - 'undeploy_model': ('endpoint', 'deployed_model_id', 'traffic_split', ), - 'update_artifact': ('artifact', 'update_mask', 'allow_missing', ), - 'update_context': ('context', 'update_mask', 'allow_missing', ), - 'update_dataset': ('dataset', 'update_mask', ), - 'update_endpoint': ('endpoint', 'update_mask', ), - 'update_entity_type': ('entity_type', 'update_mask', ), - 'update_execution': ('execution', 'update_mask', 'allow_missing', ), - 'update_feature': ('feature', 'update_mask', ), - 'update_featurestore': ('featurestore', 'update_mask', ), - 'update_index': ('index', 'update_mask', ), - 'update_index_endpoint': ('index_endpoint', 'update_mask', ), - 'update_model': ('model', 'update_mask', ), - 'update_model_deployment_monitoring_job': ('model_deployment_monitoring_job', 'update_mask', ), - 'update_specialist_pool': ('specialist_pool', 'update_mask', ), - 'update_tensorboard': ('update_mask', 'tensorboard', ), - 'update_tensorboard_experiment': ('update_mask', 'tensorboard_experiment', ), - 'update_tensorboard_run': ('update_mask', 'tensorboard_run', ), - 'update_tensorboard_time_series': ('update_mask', 'tensorboard_time_series', ), - 'upload_model': ('parent', 'model', ), - 'write_tensorboard_experiment_data': ('tensorboard_experiment', 'write_run_data_requests', ), - 'write_tensorboard_run_data': ('tensorboard_run', 'time_series_data', ), - } - - def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: - try: - key = original.func.attr.value - kword_params = self.METHOD_TO_PARAMS[key] - except (AttributeError, KeyError): - # Either not a method from the API or too convoluted to be sure. - return updated - - # If the existing code is valid, keyword args come after positional args. - # Therefore, all positional args must map to the first parameters. - args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) - if any(k.keyword.value == "request" for k in kwargs): - # We've already fixed this file, don't fix it again. - return updated - - kwargs, ctrl_kwargs = partition( - lambda a: a.keyword.value not in self.CTRL_PARAMS, - kwargs - ) - - args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] - ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) - for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) - - request_arg = cst.Arg( - value=cst.Dict([ - cst.DictElement( - cst.SimpleString("'{}'".format(name)), -cst.Element(value=arg.value) - ) - # Note: the args + kwargs looks silly, but keep in mind that - # the control parameters had to be stripped out, and that - # those could have been passed positionally or by keyword. - for name, arg in zip(kword_params, args + kwargs)]), - keyword=cst.Name("request") - ) - - return updated.with_changes( - args=[request_arg] + ctrl_kwargs - ) - - -def fix_files( - in_dir: pathlib.Path, - out_dir: pathlib.Path, - *, - transformer=aiplatformCallTransformer(), -): - """Duplicate the input dir to the output dir, fixing file method calls. - - Preconditions: - * in_dir is a real directory - * out_dir is a real, empty directory - """ - pyfile_gen = ( - pathlib.Path(os.path.join(root, f)) - for root, _, files in os.walk(in_dir) - for f in files if os.path.splitext(f)[1] == ".py" - ) - - for fpath in pyfile_gen: - with open(fpath, 'r') as f: - src = f.read() - - # Parse the code and insert method call fixes. - tree = cst.parse_module(src) - updated = tree.visit(transformer) - - # Create the path and directory structure for the new file. - updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) - updated_path.parent.mkdir(parents=True, exist_ok=True) - - # Generate the updated source file at the corresponding path. - with open(updated_path, 'w') as f: - f.write(updated.code) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser( - description="""Fix up source that uses the aiplatform client library. - -The existing sources are NOT overwritten but are copied to output_dir with changes made. - -Note: This tool operates at a best-effort level at converting positional - parameters in client method calls to keyword based parameters. - Cases where it WILL FAIL include - A) * or ** expansion in a method call. - B) Calls via function or method alias (includes free function calls) - C) Indirect or dispatched calls (e.g. the method is looked up dynamically) - - These all constitute false negatives. The tool will also detect false - positives when an API method shares a name with another method. -""") - parser.add_argument( - '-d', - '--input-directory', - required=True, - dest='input_dir', - help='the input directory to walk for python files to fix up', - ) - parser.add_argument( - '-o', - '--output-directory', - required=True, - dest='output_dir', - help='the directory to output files fixed via un-flattening', - ) - args = parser.parse_args() - input_dir = pathlib.Path(args.input_dir) - output_dir = pathlib.Path(args.output_dir) - if not input_dir.is_dir(): - print( - f"input directory '{input_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if not output_dir.is_dir(): - print( - f"output directory '{output_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if os.listdir(output_dir): - print( - f"output directory '{output_dir}' is not empty", - file=sys.stderr, - ) - sys.exit(-1) - - fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1/scripts/fixup_definition_v1_keywords.py b/owl-bot-staging/v1/scripts/fixup_definition_v1_keywords.py deleted file mode 100644 index 7fb60affb5..0000000000 --- a/owl-bot-staging/v1/scripts/fixup_definition_v1_keywords.py +++ /dev/null @@ -1,175 +0,0 @@ -#! /usr/bin/env python3 -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import argparse -import os -import libcst as cst -import pathlib -import sys -from typing import (Any, Callable, Dict, List, Sequence, Tuple) - - -def partition( - predicate: Callable[[Any], bool], - iterator: Sequence[Any] -) -> Tuple[List[Any], List[Any]]: - """A stable, out-of-place partition.""" - results = ([], []) - - for i in iterator: - results[int(predicate(i))].append(i) - - # Returns trueList, falseList - return results[1], results[0] - - -class definitionCallTransformer(cst.CSTTransformer): - CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') - METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { - } - - def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: - try: - key = original.func.attr.value - kword_params = self.METHOD_TO_PARAMS[key] - except (AttributeError, KeyError): - # Either not a method from the API or too convoluted to be sure. - return updated - - # If the existing code is valid, keyword args come after positional args. - # Therefore, all positional args must map to the first parameters. - args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) - if any(k.keyword.value == "request" for k in kwargs): - # We've already fixed this file, don't fix it again. - return updated - - kwargs, ctrl_kwargs = partition( - lambda a: a.keyword.value not in self.CTRL_PARAMS, - kwargs - ) - - args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] - ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) - for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) - - request_arg = cst.Arg( - value=cst.Dict([ - cst.DictElement( - cst.SimpleString("'{}'".format(name)), -cst.Element(value=arg.value) - ) - # Note: the args + kwargs looks silly, but keep in mind that - # the control parameters had to be stripped out, and that - # those could have been passed positionally or by keyword. - for name, arg in zip(kword_params, args + kwargs)]), - keyword=cst.Name("request") - ) - - return updated.with_changes( - args=[request_arg] + ctrl_kwargs - ) - - -def fix_files( - in_dir: pathlib.Path, - out_dir: pathlib.Path, - *, - transformer=definitionCallTransformer(), -): - """Duplicate the input dir to the output dir, fixing file method calls. - - Preconditions: - * in_dir is a real directory - * out_dir is a real, empty directory - """ - pyfile_gen = ( - pathlib.Path(os.path.join(root, f)) - for root, _, files in os.walk(in_dir) - for f in files if os.path.splitext(f)[1] == ".py" - ) - - for fpath in pyfile_gen: - with open(fpath, 'r') as f: - src = f.read() - - # Parse the code and insert method call fixes. - tree = cst.parse_module(src) - updated = tree.visit(transformer) - - # Create the path and directory structure for the new file. - updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) - updated_path.parent.mkdir(parents=True, exist_ok=True) - - # Generate the updated source file at the corresponding path. - with open(updated_path, 'w') as f: - f.write(updated.code) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser( - description="""Fix up source that uses the definition client library. - -The existing sources are NOT overwritten but are copied to output_dir with changes made. - -Note: This tool operates at a best-effort level at converting positional - parameters in client method calls to keyword based parameters. - Cases where it WILL FAIL include - A) * or ** expansion in a method call. - B) Calls via function or method alias (includes free function calls) - C) Indirect or dispatched calls (e.g. the method is looked up dynamically) - - These all constitute false negatives. The tool will also detect false - positives when an API method shares a name with another method. -""") - parser.add_argument( - '-d', - '--input-directory', - required=True, - dest='input_dir', - help='the input directory to walk for python files to fix up', - ) - parser.add_argument( - '-o', - '--output-directory', - required=True, - dest='output_dir', - help='the directory to output files fixed via un-flattening', - ) - args = parser.parse_args() - input_dir = pathlib.Path(args.input_dir) - output_dir = pathlib.Path(args.output_dir) - if not input_dir.is_dir(): - print( - f"input directory '{input_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if not output_dir.is_dir(): - print( - f"output directory '{output_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if os.listdir(output_dir): - print( - f"output directory '{output_dir}' is not empty", - file=sys.stderr, - ) - sys.exit(-1) - - fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1/scripts/fixup_instance_v1_keywords.py b/owl-bot-staging/v1/scripts/fixup_instance_v1_keywords.py deleted file mode 100644 index 52fe576948..0000000000 --- a/owl-bot-staging/v1/scripts/fixup_instance_v1_keywords.py +++ /dev/null @@ -1,175 +0,0 @@ -#! /usr/bin/env python3 -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import argparse -import os -import libcst as cst -import pathlib -import sys -from typing import (Any, Callable, Dict, List, Sequence, Tuple) - - -def partition( - predicate: Callable[[Any], bool], - iterator: Sequence[Any] -) -> Tuple[List[Any], List[Any]]: - """A stable, out-of-place partition.""" - results = ([], []) - - for i in iterator: - results[int(predicate(i))].append(i) - - # Returns trueList, falseList - return results[1], results[0] - - -class instanceCallTransformer(cst.CSTTransformer): - CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') - METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { - } - - def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: - try: - key = original.func.attr.value - kword_params = self.METHOD_TO_PARAMS[key] - except (AttributeError, KeyError): - # Either not a method from the API or too convoluted to be sure. - return updated - - # If the existing code is valid, keyword args come after positional args. - # Therefore, all positional args must map to the first parameters. - args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) - if any(k.keyword.value == "request" for k in kwargs): - # We've already fixed this file, don't fix it again. - return updated - - kwargs, ctrl_kwargs = partition( - lambda a: a.keyword.value not in self.CTRL_PARAMS, - kwargs - ) - - args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] - ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) - for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) - - request_arg = cst.Arg( - value=cst.Dict([ - cst.DictElement( - cst.SimpleString("'{}'".format(name)), -cst.Element(value=arg.value) - ) - # Note: the args + kwargs looks silly, but keep in mind that - # the control parameters had to be stripped out, and that - # those could have been passed positionally or by keyword. - for name, arg in zip(kword_params, args + kwargs)]), - keyword=cst.Name("request") - ) - - return updated.with_changes( - args=[request_arg] + ctrl_kwargs - ) - - -def fix_files( - in_dir: pathlib.Path, - out_dir: pathlib.Path, - *, - transformer=instanceCallTransformer(), -): - """Duplicate the input dir to the output dir, fixing file method calls. - - Preconditions: - * in_dir is a real directory - * out_dir is a real, empty directory - """ - pyfile_gen = ( - pathlib.Path(os.path.join(root, f)) - for root, _, files in os.walk(in_dir) - for f in files if os.path.splitext(f)[1] == ".py" - ) - - for fpath in pyfile_gen: - with open(fpath, 'r') as f: - src = f.read() - - # Parse the code and insert method call fixes. - tree = cst.parse_module(src) - updated = tree.visit(transformer) - - # Create the path and directory structure for the new file. - updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) - updated_path.parent.mkdir(parents=True, exist_ok=True) - - # Generate the updated source file at the corresponding path. - with open(updated_path, 'w') as f: - f.write(updated.code) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser( - description="""Fix up source that uses the instance client library. - -The existing sources are NOT overwritten but are copied to output_dir with changes made. - -Note: This tool operates at a best-effort level at converting positional - parameters in client method calls to keyword based parameters. - Cases where it WILL FAIL include - A) * or ** expansion in a method call. - B) Calls via function or method alias (includes free function calls) - C) Indirect or dispatched calls (e.g. the method is looked up dynamically) - - These all constitute false negatives. The tool will also detect false - positives when an API method shares a name with another method. -""") - parser.add_argument( - '-d', - '--input-directory', - required=True, - dest='input_dir', - help='the input directory to walk for python files to fix up', - ) - parser.add_argument( - '-o', - '--output-directory', - required=True, - dest='output_dir', - help='the directory to output files fixed via un-flattening', - ) - args = parser.parse_args() - input_dir = pathlib.Path(args.input_dir) - output_dir = pathlib.Path(args.output_dir) - if not input_dir.is_dir(): - print( - f"input directory '{input_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if not output_dir.is_dir(): - print( - f"output directory '{output_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if os.listdir(output_dir): - print( - f"output directory '{output_dir}' is not empty", - file=sys.stderr, - ) - sys.exit(-1) - - fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1/scripts/fixup_params_v1_keywords.py b/owl-bot-staging/v1/scripts/fixup_params_v1_keywords.py deleted file mode 100644 index 6331faf507..0000000000 --- a/owl-bot-staging/v1/scripts/fixup_params_v1_keywords.py +++ /dev/null @@ -1,175 +0,0 @@ -#! /usr/bin/env python3 -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import argparse -import os -import libcst as cst -import pathlib -import sys -from typing import (Any, Callable, Dict, List, Sequence, Tuple) - - -def partition( - predicate: Callable[[Any], bool], - iterator: Sequence[Any] -) -> Tuple[List[Any], List[Any]]: - """A stable, out-of-place partition.""" - results = ([], []) - - for i in iterator: - results[int(predicate(i))].append(i) - - # Returns trueList, falseList - return results[1], results[0] - - -class paramsCallTransformer(cst.CSTTransformer): - CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') - METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { - } - - def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: - try: - key = original.func.attr.value - kword_params = self.METHOD_TO_PARAMS[key] - except (AttributeError, KeyError): - # Either not a method from the API or too convoluted to be sure. - return updated - - # If the existing code is valid, keyword args come after positional args. - # Therefore, all positional args must map to the first parameters. - args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) - if any(k.keyword.value == "request" for k in kwargs): - # We've already fixed this file, don't fix it again. - return updated - - kwargs, ctrl_kwargs = partition( - lambda a: a.keyword.value not in self.CTRL_PARAMS, - kwargs - ) - - args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] - ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) - for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) - - request_arg = cst.Arg( - value=cst.Dict([ - cst.DictElement( - cst.SimpleString("'{}'".format(name)), -cst.Element(value=arg.value) - ) - # Note: the args + kwargs looks silly, but keep in mind that - # the control parameters had to be stripped out, and that - # those could have been passed positionally or by keyword. - for name, arg in zip(kword_params, args + kwargs)]), - keyword=cst.Name("request") - ) - - return updated.with_changes( - args=[request_arg] + ctrl_kwargs - ) - - -def fix_files( - in_dir: pathlib.Path, - out_dir: pathlib.Path, - *, - transformer=paramsCallTransformer(), -): - """Duplicate the input dir to the output dir, fixing file method calls. - - Preconditions: - * in_dir is a real directory - * out_dir is a real, empty directory - """ - pyfile_gen = ( - pathlib.Path(os.path.join(root, f)) - for root, _, files in os.walk(in_dir) - for f in files if os.path.splitext(f)[1] == ".py" - ) - - for fpath in pyfile_gen: - with open(fpath, 'r') as f: - src = f.read() - - # Parse the code and insert method call fixes. - tree = cst.parse_module(src) - updated = tree.visit(transformer) - - # Create the path and directory structure for the new file. - updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) - updated_path.parent.mkdir(parents=True, exist_ok=True) - - # Generate the updated source file at the corresponding path. - with open(updated_path, 'w') as f: - f.write(updated.code) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser( - description="""Fix up source that uses the params client library. - -The existing sources are NOT overwritten but are copied to output_dir with changes made. - -Note: This tool operates at a best-effort level at converting positional - parameters in client method calls to keyword based parameters. - Cases where it WILL FAIL include - A) * or ** expansion in a method call. - B) Calls via function or method alias (includes free function calls) - C) Indirect or dispatched calls (e.g. the method is looked up dynamically) - - These all constitute false negatives. The tool will also detect false - positives when an API method shares a name with another method. -""") - parser.add_argument( - '-d', - '--input-directory', - required=True, - dest='input_dir', - help='the input directory to walk for python files to fix up', - ) - parser.add_argument( - '-o', - '--output-directory', - required=True, - dest='output_dir', - help='the directory to output files fixed via un-flattening', - ) - args = parser.parse_args() - input_dir = pathlib.Path(args.input_dir) - output_dir = pathlib.Path(args.output_dir) - if not input_dir.is_dir(): - print( - f"input directory '{input_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if not output_dir.is_dir(): - print( - f"output directory '{output_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if os.listdir(output_dir): - print( - f"output directory '{output_dir}' is not empty", - file=sys.stderr, - ) - sys.exit(-1) - - fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1/scripts/fixup_prediction_v1_keywords.py b/owl-bot-staging/v1/scripts/fixup_prediction_v1_keywords.py deleted file mode 100644 index df2a884371..0000000000 --- a/owl-bot-staging/v1/scripts/fixup_prediction_v1_keywords.py +++ /dev/null @@ -1,175 +0,0 @@ -#! /usr/bin/env python3 -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import argparse -import os -import libcst as cst -import pathlib -import sys -from typing import (Any, Callable, Dict, List, Sequence, Tuple) - - -def partition( - predicate: Callable[[Any], bool], - iterator: Sequence[Any] -) -> Tuple[List[Any], List[Any]]: - """A stable, out-of-place partition.""" - results = ([], []) - - for i in iterator: - results[int(predicate(i))].append(i) - - # Returns trueList, falseList - return results[1], results[0] - - -class predictionCallTransformer(cst.CSTTransformer): - CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') - METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { - } - - def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: - try: - key = original.func.attr.value - kword_params = self.METHOD_TO_PARAMS[key] - except (AttributeError, KeyError): - # Either not a method from the API or too convoluted to be sure. - return updated - - # If the existing code is valid, keyword args come after positional args. - # Therefore, all positional args must map to the first parameters. - args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) - if any(k.keyword.value == "request" for k in kwargs): - # We've already fixed this file, don't fix it again. - return updated - - kwargs, ctrl_kwargs = partition( - lambda a: a.keyword.value not in self.CTRL_PARAMS, - kwargs - ) - - args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] - ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) - for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) - - request_arg = cst.Arg( - value=cst.Dict([ - cst.DictElement( - cst.SimpleString("'{}'".format(name)), -cst.Element(value=arg.value) - ) - # Note: the args + kwargs looks silly, but keep in mind that - # the control parameters had to be stripped out, and that - # those could have been passed positionally or by keyword. - for name, arg in zip(kword_params, args + kwargs)]), - keyword=cst.Name("request") - ) - - return updated.with_changes( - args=[request_arg] + ctrl_kwargs - ) - - -def fix_files( - in_dir: pathlib.Path, - out_dir: pathlib.Path, - *, - transformer=predictionCallTransformer(), -): - """Duplicate the input dir to the output dir, fixing file method calls. - - Preconditions: - * in_dir is a real directory - * out_dir is a real, empty directory - """ - pyfile_gen = ( - pathlib.Path(os.path.join(root, f)) - for root, _, files in os.walk(in_dir) - for f in files if os.path.splitext(f)[1] == ".py" - ) - - for fpath in pyfile_gen: - with open(fpath, 'r') as f: - src = f.read() - - # Parse the code and insert method call fixes. - tree = cst.parse_module(src) - updated = tree.visit(transformer) - - # Create the path and directory structure for the new file. - updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) - updated_path.parent.mkdir(parents=True, exist_ok=True) - - # Generate the updated source file at the corresponding path. - with open(updated_path, 'w') as f: - f.write(updated.code) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser( - description="""Fix up source that uses the prediction client library. - -The existing sources are NOT overwritten but are copied to output_dir with changes made. - -Note: This tool operates at a best-effort level at converting positional - parameters in client method calls to keyword based parameters. - Cases where it WILL FAIL include - A) * or ** expansion in a method call. - B) Calls via function or method alias (includes free function calls) - C) Indirect or dispatched calls (e.g. the method is looked up dynamically) - - These all constitute false negatives. The tool will also detect false - positives when an API method shares a name with another method. -""") - parser.add_argument( - '-d', - '--input-directory', - required=True, - dest='input_dir', - help='the input directory to walk for python files to fix up', - ) - parser.add_argument( - '-o', - '--output-directory', - required=True, - dest='output_dir', - help='the directory to output files fixed via un-flattening', - ) - args = parser.parse_args() - input_dir = pathlib.Path(args.input_dir) - output_dir = pathlib.Path(args.output_dir) - if not input_dir.is_dir(): - print( - f"input directory '{input_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if not output_dir.is_dir(): - print( - f"output directory '{output_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if os.listdir(output_dir): - print( - f"output directory '{output_dir}' is not empty", - file=sys.stderr, - ) - sys.exit(-1) - - fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1/setup.py b/owl-bot-staging/v1/setup.py deleted file mode 100644 index 07bf24ba8d..0000000000 --- a/owl-bot-staging/v1/setup.py +++ /dev/null @@ -1,54 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import io -import os -import setuptools # type: ignore - -version = '0.1.0' - -package_root = os.path.abspath(os.path.dirname(__file__)) - -readme_filename = os.path.join(package_root, 'README.rst') -with io.open(readme_filename, encoding='utf-8') as readme_file: - readme = readme_file.read() - -setuptools.setup( - name='google-cloud-aiplatform-v1-schema-trainingjob-definition', - version=version, - long_description=readme, - packages=setuptools.PEP420PackageFinder.find(), - namespace_packages=('google', 'google.cloud', 'google.cloud.aiplatform', 'google.cloud.aiplatform.v1', 'google.cloud.aiplatform.v1.schema', 'google.cloud.aiplatform.v1.schema.trainingjob'), - platforms='Posix; MacOS X; Windows', - include_package_data=True, - install_requires=( - 'google-api-core[grpc] >= 1.28.0, < 3.0.0dev', - 'libcst >= 0.2.5', - 'proto-plus >= 1.19.7', - ), - python_requires='>=3.6', - classifiers=[ - 'Development Status :: 3 - Alpha', - 'Intended Audience :: Developers', - 'Operating System :: OS Independent', - 'Programming Language :: Python :: 3.6', - 'Programming Language :: Python :: 3.7', - 'Programming Language :: Python :: 3.8', - 'Programming Language :: Python :: 3.9', - 'Topic :: Internet', - 'Topic :: Software Development :: Libraries :: Python Modules', - ], - zip_safe=False, -) diff --git a/owl-bot-staging/v1/tests/__init__.py b/owl-bot-staging/v1/tests/__init__.py deleted file mode 100644 index b54a5fcc42..0000000000 --- a/owl-bot-staging/v1/tests/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1/tests/unit/__init__.py b/owl-bot-staging/v1/tests/unit/__init__.py deleted file mode 100644 index b54a5fcc42..0000000000 --- a/owl-bot-staging/v1/tests/unit/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1/tests/unit/gapic/__init__.py b/owl-bot-staging/v1/tests/unit/gapic/__init__.py deleted file mode 100644 index b54a5fcc42..0000000000 --- a/owl-bot-staging/v1/tests/unit/gapic/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/__init__.py b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/__init__.py deleted file mode 100644 index b54a5fcc42..0000000000 --- a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_dataset_service.py b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_dataset_service.py deleted file mode 100644 index 5523512daf..0000000000 --- a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_dataset_service.py +++ /dev/null @@ -1,4030 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import future -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import operation_async # type: ignore -from google.api_core import operations_v1 -from google.api_core import path_template -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.aiplatform_v1.services.dataset_service import DatasetServiceAsyncClient -from google.cloud.aiplatform_v1.services.dataset_service import DatasetServiceClient -from google.cloud.aiplatform_v1.services.dataset_service import pagers -from google.cloud.aiplatform_v1.services.dataset_service import transports -from google.cloud.aiplatform_v1.types import annotation -from google.cloud.aiplatform_v1.types import annotation_spec -from google.cloud.aiplatform_v1.types import data_item -from google.cloud.aiplatform_v1.types import dataset -from google.cloud.aiplatform_v1.types import dataset as gca_dataset -from google.cloud.aiplatform_v1.types import dataset_service -from google.cloud.aiplatform_v1.types import encryption_spec -from google.cloud.aiplatform_v1.types import io -from google.cloud.aiplatform_v1.types import operation as gca_operation -from google.longrunning import operations_pb2 -from google.oauth2 import service_account -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -import google.auth - - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert DatasetServiceClient._get_default_mtls_endpoint(None) is None - assert DatasetServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert DatasetServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert DatasetServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert DatasetServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert DatasetServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - DatasetServiceClient, - DatasetServiceAsyncClient, -]) -def test_dataset_service_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.DatasetServiceGrpcTransport, "grpc"), - (transports.DatasetServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_dataset_service_client_service_account_always_use_jwt(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=False) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("client_class", [ - DatasetServiceClient, - DatasetServiceAsyncClient, -]) -def test_dataset_service_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_dataset_service_client_get_transport_class(): - transport = DatasetServiceClient.get_transport_class() - available_transports = [ - transports.DatasetServiceGrpcTransport, - ] - assert transport in available_transports - - transport = DatasetServiceClient.get_transport_class("grpc") - assert transport == transports.DatasetServiceGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (DatasetServiceClient, transports.DatasetServiceGrpcTransport, "grpc"), - (DatasetServiceAsyncClient, transports.DatasetServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(DatasetServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DatasetServiceClient)) -@mock.patch.object(DatasetServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DatasetServiceAsyncClient)) -def test_dataset_service_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(DatasetServiceClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(DatasetServiceClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (DatasetServiceClient, transports.DatasetServiceGrpcTransport, "grpc", "true"), - (DatasetServiceAsyncClient, transports.DatasetServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (DatasetServiceClient, transports.DatasetServiceGrpcTransport, "grpc", "false"), - (DatasetServiceAsyncClient, transports.DatasetServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(DatasetServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DatasetServiceClient)) -@mock.patch.object(DatasetServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DatasetServiceAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_dataset_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (DatasetServiceClient, transports.DatasetServiceGrpcTransport, "grpc"), - (DatasetServiceAsyncClient, transports.DatasetServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_dataset_service_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (DatasetServiceClient, transports.DatasetServiceGrpcTransport, "grpc"), - (DatasetServiceAsyncClient, transports.DatasetServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_dataset_service_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_dataset_service_client_client_options_from_dict(): - with mock.patch('google.cloud.aiplatform_v1.services.dataset_service.transports.DatasetServiceGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = DatasetServiceClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_create_dataset(transport: str = 'grpc', request_type=dataset_service.CreateDatasetRequest): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.create_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.CreateDatasetRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_create_dataset_from_dict(): - test_create_dataset(request_type=dict) - - -def test_create_dataset_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_dataset), - '__call__') as call: - client.create_dataset() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.CreateDatasetRequest() - - -@pytest.mark.asyncio -async def test_create_dataset_async(transport: str = 'grpc_asyncio', request_type=dataset_service.CreateDatasetRequest): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.create_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.CreateDatasetRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_create_dataset_async_from_dict(): - await test_create_dataset_async(request_type=dict) - - -def test_create_dataset_field_headers(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.CreateDatasetRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_dataset), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.create_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_dataset_field_headers_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.CreateDatasetRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_dataset), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.create_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_dataset_flattened(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_dataset( - parent='parent_value', - dataset=gca_dataset.Dataset(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].dataset - mock_val = gca_dataset.Dataset(name='name_value') - assert arg == mock_val - - -def test_create_dataset_flattened_error(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_dataset( - dataset_service.CreateDatasetRequest(), - parent='parent_value', - dataset=gca_dataset.Dataset(name='name_value'), - ) - - -@pytest.mark.asyncio -async def test_create_dataset_flattened_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_dataset( - parent='parent_value', - dataset=gca_dataset.Dataset(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].dataset - mock_val = gca_dataset.Dataset(name='name_value') - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_dataset_flattened_error_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_dataset( - dataset_service.CreateDatasetRequest(), - parent='parent_value', - dataset=gca_dataset.Dataset(name='name_value'), - ) - - -def test_get_dataset(transport: str = 'grpc', request_type=dataset_service.GetDatasetRequest): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = dataset.Dataset( - name='name_value', - display_name='display_name_value', - description='description_value', - metadata_schema_uri='metadata_schema_uri_value', - etag='etag_value', - ) - response = client.get_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.GetDatasetRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, dataset.Dataset) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.metadata_schema_uri == 'metadata_schema_uri_value' - assert response.etag == 'etag_value' - - -def test_get_dataset_from_dict(): - test_get_dataset(request_type=dict) - - -def test_get_dataset_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_dataset), - '__call__') as call: - client.get_dataset() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.GetDatasetRequest() - - -@pytest.mark.asyncio -async def test_get_dataset_async(transport: str = 'grpc_asyncio', request_type=dataset_service.GetDatasetRequest): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(dataset.Dataset( - name='name_value', - display_name='display_name_value', - description='description_value', - metadata_schema_uri='metadata_schema_uri_value', - etag='etag_value', - )) - response = await client.get_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.GetDatasetRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, dataset.Dataset) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.metadata_schema_uri == 'metadata_schema_uri_value' - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_get_dataset_async_from_dict(): - await test_get_dataset_async(request_type=dict) - - -def test_get_dataset_field_headers(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.GetDatasetRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_dataset), - '__call__') as call: - call.return_value = dataset.Dataset() - client.get_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_dataset_field_headers_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.GetDatasetRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_dataset), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset.Dataset()) - await client.get_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_dataset_flattened(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = dataset.Dataset() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_dataset( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_dataset_flattened_error(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_dataset( - dataset_service.GetDatasetRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_dataset_flattened_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = dataset.Dataset() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset.Dataset()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_dataset( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_dataset_flattened_error_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_dataset( - dataset_service.GetDatasetRequest(), - name='name_value', - ) - - -def test_update_dataset(transport: str = 'grpc', request_type=dataset_service.UpdateDatasetRequest): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_dataset.Dataset( - name='name_value', - display_name='display_name_value', - description='description_value', - metadata_schema_uri='metadata_schema_uri_value', - etag='etag_value', - ) - response = client.update_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.UpdateDatasetRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_dataset.Dataset) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.metadata_schema_uri == 'metadata_schema_uri_value' - assert response.etag == 'etag_value' - - -def test_update_dataset_from_dict(): - test_update_dataset(request_type=dict) - - -def test_update_dataset_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_dataset), - '__call__') as call: - client.update_dataset() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.UpdateDatasetRequest() - - -@pytest.mark.asyncio -async def test_update_dataset_async(transport: str = 'grpc_asyncio', request_type=dataset_service.UpdateDatasetRequest): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_dataset.Dataset( - name='name_value', - display_name='display_name_value', - description='description_value', - metadata_schema_uri='metadata_schema_uri_value', - etag='etag_value', - )) - response = await client.update_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.UpdateDatasetRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_dataset.Dataset) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.metadata_schema_uri == 'metadata_schema_uri_value' - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_update_dataset_async_from_dict(): - await test_update_dataset_async(request_type=dict) - - -def test_update_dataset_field_headers(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.UpdateDatasetRequest() - - request.dataset.name = 'dataset.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_dataset), - '__call__') as call: - call.return_value = gca_dataset.Dataset() - client.update_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'dataset.name=dataset.name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_dataset_field_headers_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.UpdateDatasetRequest() - - request.dataset.name = 'dataset.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_dataset), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_dataset.Dataset()) - await client.update_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'dataset.name=dataset.name/value', - ) in kw['metadata'] - - -def test_update_dataset_flattened(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_dataset.Dataset() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_dataset( - dataset=gca_dataset.Dataset(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].dataset - mock_val = gca_dataset.Dataset(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -def test_update_dataset_flattened_error(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_dataset( - dataset_service.UpdateDatasetRequest(), - dataset=gca_dataset.Dataset(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.asyncio -async def test_update_dataset_flattened_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_dataset.Dataset() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_dataset.Dataset()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_dataset( - dataset=gca_dataset.Dataset(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].dataset - mock_val = gca_dataset.Dataset(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_update_dataset_flattened_error_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_dataset( - dataset_service.UpdateDatasetRequest(), - dataset=gca_dataset.Dataset(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_list_datasets(transport: str = 'grpc', request_type=dataset_service.ListDatasetsRequest): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = dataset_service.ListDatasetsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_datasets(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.ListDatasetsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListDatasetsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_datasets_from_dict(): - test_list_datasets(request_type=dict) - - -def test_list_datasets_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__') as call: - client.list_datasets() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.ListDatasetsRequest() - - -@pytest.mark.asyncio -async def test_list_datasets_async(transport: str = 'grpc_asyncio', request_type=dataset_service.ListDatasetsRequest): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListDatasetsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_datasets(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.ListDatasetsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListDatasetsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_datasets_async_from_dict(): - await test_list_datasets_async(request_type=dict) - - -def test_list_datasets_field_headers(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.ListDatasetsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__') as call: - call.return_value = dataset_service.ListDatasetsResponse() - client.list_datasets(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_datasets_field_headers_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.ListDatasetsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListDatasetsResponse()) - await client.list_datasets(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_datasets_flattened(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = dataset_service.ListDatasetsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_datasets( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_datasets_flattened_error(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_datasets( - dataset_service.ListDatasetsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_datasets_flattened_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = dataset_service.ListDatasetsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListDatasetsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_datasets( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_datasets_flattened_error_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_datasets( - dataset_service.ListDatasetsRequest(), - parent='parent_value', - ) - - -def test_list_datasets_pager(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - dataset_service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - dataset.Dataset(), - dataset.Dataset(), - ], - next_page_token='abc', - ), - dataset_service.ListDatasetsResponse( - datasets=[], - next_page_token='def', - ), - dataset_service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - ], - next_page_token='ghi', - ), - dataset_service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - dataset.Dataset(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_datasets(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, dataset.Dataset) - for i in results) - -def test_list_datasets_pages(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - dataset_service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - dataset.Dataset(), - dataset.Dataset(), - ], - next_page_token='abc', - ), - dataset_service.ListDatasetsResponse( - datasets=[], - next_page_token='def', - ), - dataset_service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - ], - next_page_token='ghi', - ), - dataset_service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - dataset.Dataset(), - ], - ), - RuntimeError, - ) - pages = list(client.list_datasets(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_datasets_async_pager(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - dataset_service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - dataset.Dataset(), - dataset.Dataset(), - ], - next_page_token='abc', - ), - dataset_service.ListDatasetsResponse( - datasets=[], - next_page_token='def', - ), - dataset_service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - ], - next_page_token='ghi', - ), - dataset_service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - dataset.Dataset(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_datasets(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, dataset.Dataset) - for i in responses) - -@pytest.mark.asyncio -async def test_list_datasets_async_pages(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - dataset_service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - dataset.Dataset(), - dataset.Dataset(), - ], - next_page_token='abc', - ), - dataset_service.ListDatasetsResponse( - datasets=[], - next_page_token='def', - ), - dataset_service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - ], - next_page_token='ghi', - ), - dataset_service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - dataset.Dataset(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_datasets(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_delete_dataset(transport: str = 'grpc', request_type=dataset_service.DeleteDatasetRequest): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.DeleteDatasetRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_dataset_from_dict(): - test_delete_dataset(request_type=dict) - - -def test_delete_dataset_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_dataset), - '__call__') as call: - client.delete_dataset() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.DeleteDatasetRequest() - - -@pytest.mark.asyncio -async def test_delete_dataset_async(transport: str = 'grpc_asyncio', request_type=dataset_service.DeleteDatasetRequest): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.DeleteDatasetRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_dataset_async_from_dict(): - await test_delete_dataset_async(request_type=dict) - - -def test_delete_dataset_field_headers(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.DeleteDatasetRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_dataset), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_dataset_field_headers_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.DeleteDatasetRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_dataset), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_dataset_flattened(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_dataset( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_delete_dataset_flattened_error(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_dataset( - dataset_service.DeleteDatasetRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_dataset_flattened_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_dataset( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_dataset_flattened_error_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_dataset( - dataset_service.DeleteDatasetRequest(), - name='name_value', - ) - - -def test_import_data(transport: str = 'grpc', request_type=dataset_service.ImportDataRequest): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.import_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.ImportDataRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_import_data_from_dict(): - test_import_data(request_type=dict) - - -def test_import_data_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_data), - '__call__') as call: - client.import_data() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.ImportDataRequest() - - -@pytest.mark.asyncio -async def test_import_data_async(transport: str = 'grpc_asyncio', request_type=dataset_service.ImportDataRequest): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.import_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.ImportDataRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_import_data_async_from_dict(): - await test_import_data_async(request_type=dict) - - -def test_import_data_field_headers(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.ImportDataRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_data), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.import_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_import_data_field_headers_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.ImportDataRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_data), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.import_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_import_data_flattened(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.import_data( - name='name_value', - import_configs=[dataset.ImportDataConfig(gcs_source=io.GcsSource(uris=['uris_value']))], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - arg = args[0].import_configs - mock_val = [dataset.ImportDataConfig(gcs_source=io.GcsSource(uris=['uris_value']))] - assert arg == mock_val - - -def test_import_data_flattened_error(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.import_data( - dataset_service.ImportDataRequest(), - name='name_value', - import_configs=[dataset.ImportDataConfig(gcs_source=io.GcsSource(uris=['uris_value']))], - ) - - -@pytest.mark.asyncio -async def test_import_data_flattened_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.import_data( - name='name_value', - import_configs=[dataset.ImportDataConfig(gcs_source=io.GcsSource(uris=['uris_value']))], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - arg = args[0].import_configs - mock_val = [dataset.ImportDataConfig(gcs_source=io.GcsSource(uris=['uris_value']))] - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_import_data_flattened_error_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.import_data( - dataset_service.ImportDataRequest(), - name='name_value', - import_configs=[dataset.ImportDataConfig(gcs_source=io.GcsSource(uris=['uris_value']))], - ) - - -def test_export_data(transport: str = 'grpc', request_type=dataset_service.ExportDataRequest): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.export_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.ExportDataRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_export_data_from_dict(): - test_export_data(request_type=dict) - - -def test_export_data_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_data), - '__call__') as call: - client.export_data() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.ExportDataRequest() - - -@pytest.mark.asyncio -async def test_export_data_async(transport: str = 'grpc_asyncio', request_type=dataset_service.ExportDataRequest): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.export_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.ExportDataRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_export_data_async_from_dict(): - await test_export_data_async(request_type=dict) - - -def test_export_data_field_headers(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.ExportDataRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_data), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.export_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_export_data_field_headers_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.ExportDataRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_data), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.export_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_export_data_flattened(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.export_data( - name='name_value', - export_config=dataset.ExportDataConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - arg = args[0].export_config - mock_val = dataset.ExportDataConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')) - assert arg == mock_val - - -def test_export_data_flattened_error(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.export_data( - dataset_service.ExportDataRequest(), - name='name_value', - export_config=dataset.ExportDataConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), - ) - - -@pytest.mark.asyncio -async def test_export_data_flattened_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.export_data( - name='name_value', - export_config=dataset.ExportDataConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - arg = args[0].export_config - mock_val = dataset.ExportDataConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')) - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_export_data_flattened_error_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.export_data( - dataset_service.ExportDataRequest(), - name='name_value', - export_config=dataset.ExportDataConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), - ) - - -def test_list_data_items(transport: str = 'grpc', request_type=dataset_service.ListDataItemsRequest): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_items), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = dataset_service.ListDataItemsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_data_items(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.ListDataItemsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListDataItemsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_data_items_from_dict(): - test_list_data_items(request_type=dict) - - -def test_list_data_items_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_items), - '__call__') as call: - client.list_data_items() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.ListDataItemsRequest() - - -@pytest.mark.asyncio -async def test_list_data_items_async(transport: str = 'grpc_asyncio', request_type=dataset_service.ListDataItemsRequest): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_items), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListDataItemsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_data_items(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.ListDataItemsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListDataItemsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_data_items_async_from_dict(): - await test_list_data_items_async(request_type=dict) - - -def test_list_data_items_field_headers(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.ListDataItemsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_items), - '__call__') as call: - call.return_value = dataset_service.ListDataItemsResponse() - client.list_data_items(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_data_items_field_headers_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.ListDataItemsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_items), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListDataItemsResponse()) - await client.list_data_items(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_data_items_flattened(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_items), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = dataset_service.ListDataItemsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_data_items( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_data_items_flattened_error(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_data_items( - dataset_service.ListDataItemsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_data_items_flattened_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_items), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = dataset_service.ListDataItemsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListDataItemsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_data_items( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_data_items_flattened_error_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_data_items( - dataset_service.ListDataItemsRequest(), - parent='parent_value', - ) - - -def test_list_data_items_pager(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_items), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - dataset_service.ListDataItemsResponse( - data_items=[ - data_item.DataItem(), - data_item.DataItem(), - data_item.DataItem(), - ], - next_page_token='abc', - ), - dataset_service.ListDataItemsResponse( - data_items=[], - next_page_token='def', - ), - dataset_service.ListDataItemsResponse( - data_items=[ - data_item.DataItem(), - ], - next_page_token='ghi', - ), - dataset_service.ListDataItemsResponse( - data_items=[ - data_item.DataItem(), - data_item.DataItem(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_data_items(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, data_item.DataItem) - for i in results) - -def test_list_data_items_pages(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_items), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - dataset_service.ListDataItemsResponse( - data_items=[ - data_item.DataItem(), - data_item.DataItem(), - data_item.DataItem(), - ], - next_page_token='abc', - ), - dataset_service.ListDataItemsResponse( - data_items=[], - next_page_token='def', - ), - dataset_service.ListDataItemsResponse( - data_items=[ - data_item.DataItem(), - ], - next_page_token='ghi', - ), - dataset_service.ListDataItemsResponse( - data_items=[ - data_item.DataItem(), - data_item.DataItem(), - ], - ), - RuntimeError, - ) - pages = list(client.list_data_items(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_data_items_async_pager(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_items), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - dataset_service.ListDataItemsResponse( - data_items=[ - data_item.DataItem(), - data_item.DataItem(), - data_item.DataItem(), - ], - next_page_token='abc', - ), - dataset_service.ListDataItemsResponse( - data_items=[], - next_page_token='def', - ), - dataset_service.ListDataItemsResponse( - data_items=[ - data_item.DataItem(), - ], - next_page_token='ghi', - ), - dataset_service.ListDataItemsResponse( - data_items=[ - data_item.DataItem(), - data_item.DataItem(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_data_items(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, data_item.DataItem) - for i in responses) - -@pytest.mark.asyncio -async def test_list_data_items_async_pages(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_items), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - dataset_service.ListDataItemsResponse( - data_items=[ - data_item.DataItem(), - data_item.DataItem(), - data_item.DataItem(), - ], - next_page_token='abc', - ), - dataset_service.ListDataItemsResponse( - data_items=[], - next_page_token='def', - ), - dataset_service.ListDataItemsResponse( - data_items=[ - data_item.DataItem(), - ], - next_page_token='ghi', - ), - dataset_service.ListDataItemsResponse( - data_items=[ - data_item.DataItem(), - data_item.DataItem(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_data_items(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_get_annotation_spec(transport: str = 'grpc', request_type=dataset_service.GetAnnotationSpecRequest): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_annotation_spec), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = annotation_spec.AnnotationSpec( - name='name_value', - display_name='display_name_value', - etag='etag_value', - ) - response = client.get_annotation_spec(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.GetAnnotationSpecRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, annotation_spec.AnnotationSpec) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.etag == 'etag_value' - - -def test_get_annotation_spec_from_dict(): - test_get_annotation_spec(request_type=dict) - - -def test_get_annotation_spec_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_annotation_spec), - '__call__') as call: - client.get_annotation_spec() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.GetAnnotationSpecRequest() - - -@pytest.mark.asyncio -async def test_get_annotation_spec_async(transport: str = 'grpc_asyncio', request_type=dataset_service.GetAnnotationSpecRequest): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_annotation_spec), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(annotation_spec.AnnotationSpec( - name='name_value', - display_name='display_name_value', - etag='etag_value', - )) - response = await client.get_annotation_spec(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.GetAnnotationSpecRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, annotation_spec.AnnotationSpec) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_get_annotation_spec_async_from_dict(): - await test_get_annotation_spec_async(request_type=dict) - - -def test_get_annotation_spec_field_headers(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.GetAnnotationSpecRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_annotation_spec), - '__call__') as call: - call.return_value = annotation_spec.AnnotationSpec() - client.get_annotation_spec(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_annotation_spec_field_headers_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.GetAnnotationSpecRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_annotation_spec), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(annotation_spec.AnnotationSpec()) - await client.get_annotation_spec(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_annotation_spec_flattened(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_annotation_spec), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = annotation_spec.AnnotationSpec() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_annotation_spec( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_annotation_spec_flattened_error(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_annotation_spec( - dataset_service.GetAnnotationSpecRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_annotation_spec_flattened_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_annotation_spec), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = annotation_spec.AnnotationSpec() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(annotation_spec.AnnotationSpec()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_annotation_spec( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_annotation_spec_flattened_error_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_annotation_spec( - dataset_service.GetAnnotationSpecRequest(), - name='name_value', - ) - - -def test_list_annotations(transport: str = 'grpc', request_type=dataset_service.ListAnnotationsRequest): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_annotations), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = dataset_service.ListAnnotationsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_annotations(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.ListAnnotationsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListAnnotationsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_annotations_from_dict(): - test_list_annotations(request_type=dict) - - -def test_list_annotations_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_annotations), - '__call__') as call: - client.list_annotations() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.ListAnnotationsRequest() - - -@pytest.mark.asyncio -async def test_list_annotations_async(transport: str = 'grpc_asyncio', request_type=dataset_service.ListAnnotationsRequest): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_annotations), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListAnnotationsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_annotations(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.ListAnnotationsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListAnnotationsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_annotations_async_from_dict(): - await test_list_annotations_async(request_type=dict) - - -def test_list_annotations_field_headers(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.ListAnnotationsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_annotations), - '__call__') as call: - call.return_value = dataset_service.ListAnnotationsResponse() - client.list_annotations(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_annotations_field_headers_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.ListAnnotationsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_annotations), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListAnnotationsResponse()) - await client.list_annotations(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_annotations_flattened(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_annotations), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = dataset_service.ListAnnotationsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_annotations( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_annotations_flattened_error(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_annotations( - dataset_service.ListAnnotationsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_annotations_flattened_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_annotations), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = dataset_service.ListAnnotationsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListAnnotationsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_annotations( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_annotations_flattened_error_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_annotations( - dataset_service.ListAnnotationsRequest(), - parent='parent_value', - ) - - -def test_list_annotations_pager(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_annotations), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - dataset_service.ListAnnotationsResponse( - annotations=[ - annotation.Annotation(), - annotation.Annotation(), - annotation.Annotation(), - ], - next_page_token='abc', - ), - dataset_service.ListAnnotationsResponse( - annotations=[], - next_page_token='def', - ), - dataset_service.ListAnnotationsResponse( - annotations=[ - annotation.Annotation(), - ], - next_page_token='ghi', - ), - dataset_service.ListAnnotationsResponse( - annotations=[ - annotation.Annotation(), - annotation.Annotation(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_annotations(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, annotation.Annotation) - for i in results) - -def test_list_annotations_pages(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_annotations), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - dataset_service.ListAnnotationsResponse( - annotations=[ - annotation.Annotation(), - annotation.Annotation(), - annotation.Annotation(), - ], - next_page_token='abc', - ), - dataset_service.ListAnnotationsResponse( - annotations=[], - next_page_token='def', - ), - dataset_service.ListAnnotationsResponse( - annotations=[ - annotation.Annotation(), - ], - next_page_token='ghi', - ), - dataset_service.ListAnnotationsResponse( - annotations=[ - annotation.Annotation(), - annotation.Annotation(), - ], - ), - RuntimeError, - ) - pages = list(client.list_annotations(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_annotations_async_pager(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_annotations), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - dataset_service.ListAnnotationsResponse( - annotations=[ - annotation.Annotation(), - annotation.Annotation(), - annotation.Annotation(), - ], - next_page_token='abc', - ), - dataset_service.ListAnnotationsResponse( - annotations=[], - next_page_token='def', - ), - dataset_service.ListAnnotationsResponse( - annotations=[ - annotation.Annotation(), - ], - next_page_token='ghi', - ), - dataset_service.ListAnnotationsResponse( - annotations=[ - annotation.Annotation(), - annotation.Annotation(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_annotations(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, annotation.Annotation) - for i in responses) - -@pytest.mark.asyncio -async def test_list_annotations_async_pages(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_annotations), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - dataset_service.ListAnnotationsResponse( - annotations=[ - annotation.Annotation(), - annotation.Annotation(), - annotation.Annotation(), - ], - next_page_token='abc', - ), - dataset_service.ListAnnotationsResponse( - annotations=[], - next_page_token='def', - ), - dataset_service.ListAnnotationsResponse( - annotations=[ - annotation.Annotation(), - ], - next_page_token='ghi', - ), - dataset_service.ListAnnotationsResponse( - annotations=[ - annotation.Annotation(), - annotation.Annotation(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_annotations(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.DatasetServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.DatasetServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = DatasetServiceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.DatasetServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = DatasetServiceClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.DatasetServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = DatasetServiceClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.DatasetServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.DatasetServiceGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.DatasetServiceGrpcTransport, - transports.DatasetServiceGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.DatasetServiceGrpcTransport, - ) - -def test_dataset_service_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.DatasetServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_dataset_service_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.aiplatform_v1.services.dataset_service.transports.DatasetServiceTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.DatasetServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'create_dataset', - 'get_dataset', - 'update_dataset', - 'list_datasets', - 'delete_dataset', - 'import_data', - 'export_data', - 'list_data_items', - 'get_annotation_spec', - 'list_annotations', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - with pytest.raises(NotImplementedError): - transport.close() - - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client - - -def test_dataset_service_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.dataset_service.transports.DatasetServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.DatasetServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -def test_dataset_service_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1.services.dataset_service.transports.DatasetServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.DatasetServiceTransport() - adc.assert_called_once() - - -def test_dataset_service_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - DatasetServiceClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.DatasetServiceGrpcTransport, - transports.DatasetServiceGrpcAsyncIOTransport, - ], -) -def test_dataset_service_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.DatasetServiceGrpcTransport, grpc_helpers), - (transports.DatasetServiceGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_dataset_service_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "aiplatform.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="aiplatform.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.DatasetServiceGrpcTransport, transports.DatasetServiceGrpcAsyncIOTransport]) -def test_dataset_service_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_dataset_service_host_no_port(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_dataset_service_host_with_port(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:8000' - -def test_dataset_service_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.DatasetServiceGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_dataset_service_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.DatasetServiceGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.DatasetServiceGrpcTransport, transports.DatasetServiceGrpcAsyncIOTransport]) -def test_dataset_service_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.DatasetServiceGrpcTransport, transports.DatasetServiceGrpcAsyncIOTransport]) -def test_dataset_service_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_dataset_service_grpc_lro_client(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_dataset_service_grpc_lro_async_client(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc_asyncio', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsAsyncClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_annotation_path(): - project = "squid" - location = "clam" - dataset = "whelk" - data_item = "octopus" - annotation = "oyster" - expected = "projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}/annotations/{annotation}".format(project=project, location=location, dataset=dataset, data_item=data_item, annotation=annotation, ) - actual = DatasetServiceClient.annotation_path(project, location, dataset, data_item, annotation) - assert expected == actual - - -def test_parse_annotation_path(): - expected = { - "project": "nudibranch", - "location": "cuttlefish", - "dataset": "mussel", - "data_item": "winkle", - "annotation": "nautilus", - } - path = DatasetServiceClient.annotation_path(**expected) - - # Check that the path construction is reversible. - actual = DatasetServiceClient.parse_annotation_path(path) - assert expected == actual - -def test_annotation_spec_path(): - project = "scallop" - location = "abalone" - dataset = "squid" - annotation_spec = "clam" - expected = "projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}".format(project=project, location=location, dataset=dataset, annotation_spec=annotation_spec, ) - actual = DatasetServiceClient.annotation_spec_path(project, location, dataset, annotation_spec) - assert expected == actual - - -def test_parse_annotation_spec_path(): - expected = { - "project": "whelk", - "location": "octopus", - "dataset": "oyster", - "annotation_spec": "nudibranch", - } - path = DatasetServiceClient.annotation_spec_path(**expected) - - # Check that the path construction is reversible. - actual = DatasetServiceClient.parse_annotation_spec_path(path) - assert expected == actual - -def test_data_item_path(): - project = "cuttlefish" - location = "mussel" - dataset = "winkle" - data_item = "nautilus" - expected = "projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}".format(project=project, location=location, dataset=dataset, data_item=data_item, ) - actual = DatasetServiceClient.data_item_path(project, location, dataset, data_item) - assert expected == actual - - -def test_parse_data_item_path(): - expected = { - "project": "scallop", - "location": "abalone", - "dataset": "squid", - "data_item": "clam", - } - path = DatasetServiceClient.data_item_path(**expected) - - # Check that the path construction is reversible. - actual = DatasetServiceClient.parse_data_item_path(path) - assert expected == actual - -def test_dataset_path(): - project = "whelk" - location = "octopus" - dataset = "oyster" - expected = "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) - actual = DatasetServiceClient.dataset_path(project, location, dataset) - assert expected == actual - - -def test_parse_dataset_path(): - expected = { - "project": "nudibranch", - "location": "cuttlefish", - "dataset": "mussel", - } - path = DatasetServiceClient.dataset_path(**expected) - - # Check that the path construction is reversible. - actual = DatasetServiceClient.parse_dataset_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "winkle" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = DatasetServiceClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "nautilus", - } - path = DatasetServiceClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = DatasetServiceClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "scallop" - expected = "folders/{folder}".format(folder=folder, ) - actual = DatasetServiceClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "abalone", - } - path = DatasetServiceClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = DatasetServiceClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "squid" - expected = "organizations/{organization}".format(organization=organization, ) - actual = DatasetServiceClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "clam", - } - path = DatasetServiceClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = DatasetServiceClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "whelk" - expected = "projects/{project}".format(project=project, ) - actual = DatasetServiceClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "octopus", - } - path = DatasetServiceClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = DatasetServiceClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "oyster" - location = "nudibranch" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = DatasetServiceClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "cuttlefish", - "location": "mussel", - } - path = DatasetServiceClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = DatasetServiceClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.DatasetServiceTransport, '_prep_wrapped_messages') as prep: - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.DatasetServiceTransport, '_prep_wrapped_messages') as prep: - transport_class = DatasetServiceClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - -@pytest.mark.asyncio -async def test_transport_close_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: - async with client: - close.assert_not_called() - close.assert_called_once() - -def test_transport_close(): - transports = { - "grpc": "_grpc_channel", - } - - for transport, close_name in transports.items(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: - with client: - close.assert_not_called() - close.assert_called_once() - -def test_client_ctx(): - transports = [ - 'grpc', - ] - for transport in transports: - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - # Test client calls underlying transport. - with mock.patch.object(type(client.transport), "close") as close: - close.assert_not_called() - with client: - pass - close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_endpoint_service.py b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_endpoint_service.py deleted file mode 100644 index 5d8d31e640..0000000000 --- a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_endpoint_service.py +++ /dev/null @@ -1,2977 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import future -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import operation_async # type: ignore -from google.api_core import operations_v1 -from google.api_core import path_template -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.aiplatform_v1.services.endpoint_service import EndpointServiceAsyncClient -from google.cloud.aiplatform_v1.services.endpoint_service import EndpointServiceClient -from google.cloud.aiplatform_v1.services.endpoint_service import pagers -from google.cloud.aiplatform_v1.services.endpoint_service import transports -from google.cloud.aiplatform_v1.types import accelerator_type -from google.cloud.aiplatform_v1.types import encryption_spec -from google.cloud.aiplatform_v1.types import endpoint -from google.cloud.aiplatform_v1.types import endpoint as gca_endpoint -from google.cloud.aiplatform_v1.types import endpoint_service -from google.cloud.aiplatform_v1.types import explanation -from google.cloud.aiplatform_v1.types import explanation_metadata -from google.cloud.aiplatform_v1.types import machine_resources -from google.cloud.aiplatform_v1.types import operation as gca_operation -from google.longrunning import operations_pb2 -from google.oauth2 import service_account -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -import google.auth - - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert EndpointServiceClient._get_default_mtls_endpoint(None) is None - assert EndpointServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert EndpointServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert EndpointServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert EndpointServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert EndpointServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - EndpointServiceClient, - EndpointServiceAsyncClient, -]) -def test_endpoint_service_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.EndpointServiceGrpcTransport, "grpc"), - (transports.EndpointServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_endpoint_service_client_service_account_always_use_jwt(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=False) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("client_class", [ - EndpointServiceClient, - EndpointServiceAsyncClient, -]) -def test_endpoint_service_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_endpoint_service_client_get_transport_class(): - transport = EndpointServiceClient.get_transport_class() - available_transports = [ - transports.EndpointServiceGrpcTransport, - ] - assert transport in available_transports - - transport = EndpointServiceClient.get_transport_class("grpc") - assert transport == transports.EndpointServiceGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (EndpointServiceClient, transports.EndpointServiceGrpcTransport, "grpc"), - (EndpointServiceAsyncClient, transports.EndpointServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(EndpointServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(EndpointServiceClient)) -@mock.patch.object(EndpointServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(EndpointServiceAsyncClient)) -def test_endpoint_service_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(EndpointServiceClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(EndpointServiceClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (EndpointServiceClient, transports.EndpointServiceGrpcTransport, "grpc", "true"), - (EndpointServiceAsyncClient, transports.EndpointServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (EndpointServiceClient, transports.EndpointServiceGrpcTransport, "grpc", "false"), - (EndpointServiceAsyncClient, transports.EndpointServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(EndpointServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(EndpointServiceClient)) -@mock.patch.object(EndpointServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(EndpointServiceAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_endpoint_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (EndpointServiceClient, transports.EndpointServiceGrpcTransport, "grpc"), - (EndpointServiceAsyncClient, transports.EndpointServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_endpoint_service_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (EndpointServiceClient, transports.EndpointServiceGrpcTransport, "grpc"), - (EndpointServiceAsyncClient, transports.EndpointServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_endpoint_service_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_endpoint_service_client_client_options_from_dict(): - with mock.patch('google.cloud.aiplatform_v1.services.endpoint_service.transports.EndpointServiceGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = EndpointServiceClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_create_endpoint(transport: str = 'grpc', request_type=endpoint_service.CreateEndpointRequest): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.create_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.CreateEndpointRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_create_endpoint_from_dict(): - test_create_endpoint(request_type=dict) - - -def test_create_endpoint_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_endpoint), - '__call__') as call: - client.create_endpoint() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.CreateEndpointRequest() - - -@pytest.mark.asyncio -async def test_create_endpoint_async(transport: str = 'grpc_asyncio', request_type=endpoint_service.CreateEndpointRequest): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.create_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.CreateEndpointRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_create_endpoint_async_from_dict(): - await test_create_endpoint_async(request_type=dict) - - -def test_create_endpoint_field_headers(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = endpoint_service.CreateEndpointRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_endpoint), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.create_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_endpoint_field_headers_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = endpoint_service.CreateEndpointRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_endpoint), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.create_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_endpoint_flattened(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_endpoint( - parent='parent_value', - endpoint=gca_endpoint.Endpoint(name='name_value'), - endpoint_id='endpoint_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].endpoint - mock_val = gca_endpoint.Endpoint(name='name_value') - assert arg == mock_val - arg = args[0].endpoint_id - mock_val = 'endpoint_id_value' - assert arg == mock_val - - -def test_create_endpoint_flattened_error(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_endpoint( - endpoint_service.CreateEndpointRequest(), - parent='parent_value', - endpoint=gca_endpoint.Endpoint(name='name_value'), - endpoint_id='endpoint_id_value', - ) - - -@pytest.mark.asyncio -async def test_create_endpoint_flattened_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_endpoint( - parent='parent_value', - endpoint=gca_endpoint.Endpoint(name='name_value'), - endpoint_id='endpoint_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].endpoint - mock_val = gca_endpoint.Endpoint(name='name_value') - assert arg == mock_val - arg = args[0].endpoint_id - mock_val = 'endpoint_id_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_endpoint_flattened_error_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_endpoint( - endpoint_service.CreateEndpointRequest(), - parent='parent_value', - endpoint=gca_endpoint.Endpoint(name='name_value'), - endpoint_id='endpoint_id_value', - ) - - -def test_get_endpoint(transport: str = 'grpc', request_type=endpoint_service.GetEndpointRequest): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = endpoint.Endpoint( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - network='network_value', - enable_private_service_connect=True, - model_deployment_monitoring_job='model_deployment_monitoring_job_value', - ) - response = client.get_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.GetEndpointRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, endpoint.Endpoint) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - assert response.network == 'network_value' - assert response.enable_private_service_connect is True - assert response.model_deployment_monitoring_job == 'model_deployment_monitoring_job_value' - - -def test_get_endpoint_from_dict(): - test_get_endpoint(request_type=dict) - - -def test_get_endpoint_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_endpoint), - '__call__') as call: - client.get_endpoint() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.GetEndpointRequest() - - -@pytest.mark.asyncio -async def test_get_endpoint_async(transport: str = 'grpc_asyncio', request_type=endpoint_service.GetEndpointRequest): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(endpoint.Endpoint( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - network='network_value', - enable_private_service_connect=True, - model_deployment_monitoring_job='model_deployment_monitoring_job_value', - )) - response = await client.get_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.GetEndpointRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, endpoint.Endpoint) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - assert response.network == 'network_value' - assert response.enable_private_service_connect is True - assert response.model_deployment_monitoring_job == 'model_deployment_monitoring_job_value' - - -@pytest.mark.asyncio -async def test_get_endpoint_async_from_dict(): - await test_get_endpoint_async(request_type=dict) - - -def test_get_endpoint_field_headers(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = endpoint_service.GetEndpointRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_endpoint), - '__call__') as call: - call.return_value = endpoint.Endpoint() - client.get_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_endpoint_field_headers_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = endpoint_service.GetEndpointRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_endpoint), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(endpoint.Endpoint()) - await client.get_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_endpoint_flattened(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = endpoint.Endpoint() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_endpoint( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_endpoint_flattened_error(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_endpoint( - endpoint_service.GetEndpointRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_endpoint_flattened_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = endpoint.Endpoint() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(endpoint.Endpoint()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_endpoint( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_endpoint_flattened_error_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_endpoint( - endpoint_service.GetEndpointRequest(), - name='name_value', - ) - - -def test_list_endpoints(transport: str = 'grpc', request_type=endpoint_service.ListEndpointsRequest): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_endpoints), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = endpoint_service.ListEndpointsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_endpoints(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.ListEndpointsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListEndpointsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_endpoints_from_dict(): - test_list_endpoints(request_type=dict) - - -def test_list_endpoints_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_endpoints), - '__call__') as call: - client.list_endpoints() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.ListEndpointsRequest() - - -@pytest.mark.asyncio -async def test_list_endpoints_async(transport: str = 'grpc_asyncio', request_type=endpoint_service.ListEndpointsRequest): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_endpoints), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(endpoint_service.ListEndpointsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_endpoints(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.ListEndpointsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListEndpointsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_endpoints_async_from_dict(): - await test_list_endpoints_async(request_type=dict) - - -def test_list_endpoints_field_headers(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = endpoint_service.ListEndpointsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_endpoints), - '__call__') as call: - call.return_value = endpoint_service.ListEndpointsResponse() - client.list_endpoints(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_endpoints_field_headers_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = endpoint_service.ListEndpointsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_endpoints), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(endpoint_service.ListEndpointsResponse()) - await client.list_endpoints(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_endpoints_flattened(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_endpoints), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = endpoint_service.ListEndpointsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_endpoints( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_endpoints_flattened_error(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_endpoints( - endpoint_service.ListEndpointsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_endpoints_flattened_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_endpoints), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = endpoint_service.ListEndpointsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(endpoint_service.ListEndpointsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_endpoints( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_endpoints_flattened_error_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_endpoints( - endpoint_service.ListEndpointsRequest(), - parent='parent_value', - ) - - -def test_list_endpoints_pager(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_endpoints), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - endpoint_service.ListEndpointsResponse( - endpoints=[ - endpoint.Endpoint(), - endpoint.Endpoint(), - endpoint.Endpoint(), - ], - next_page_token='abc', - ), - endpoint_service.ListEndpointsResponse( - endpoints=[], - next_page_token='def', - ), - endpoint_service.ListEndpointsResponse( - endpoints=[ - endpoint.Endpoint(), - ], - next_page_token='ghi', - ), - endpoint_service.ListEndpointsResponse( - endpoints=[ - endpoint.Endpoint(), - endpoint.Endpoint(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_endpoints(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, endpoint.Endpoint) - for i in results) - -def test_list_endpoints_pages(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_endpoints), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - endpoint_service.ListEndpointsResponse( - endpoints=[ - endpoint.Endpoint(), - endpoint.Endpoint(), - endpoint.Endpoint(), - ], - next_page_token='abc', - ), - endpoint_service.ListEndpointsResponse( - endpoints=[], - next_page_token='def', - ), - endpoint_service.ListEndpointsResponse( - endpoints=[ - endpoint.Endpoint(), - ], - next_page_token='ghi', - ), - endpoint_service.ListEndpointsResponse( - endpoints=[ - endpoint.Endpoint(), - endpoint.Endpoint(), - ], - ), - RuntimeError, - ) - pages = list(client.list_endpoints(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_endpoints_async_pager(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_endpoints), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - endpoint_service.ListEndpointsResponse( - endpoints=[ - endpoint.Endpoint(), - endpoint.Endpoint(), - endpoint.Endpoint(), - ], - next_page_token='abc', - ), - endpoint_service.ListEndpointsResponse( - endpoints=[], - next_page_token='def', - ), - endpoint_service.ListEndpointsResponse( - endpoints=[ - endpoint.Endpoint(), - ], - next_page_token='ghi', - ), - endpoint_service.ListEndpointsResponse( - endpoints=[ - endpoint.Endpoint(), - endpoint.Endpoint(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_endpoints(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, endpoint.Endpoint) - for i in responses) - -@pytest.mark.asyncio -async def test_list_endpoints_async_pages(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_endpoints), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - endpoint_service.ListEndpointsResponse( - endpoints=[ - endpoint.Endpoint(), - endpoint.Endpoint(), - endpoint.Endpoint(), - ], - next_page_token='abc', - ), - endpoint_service.ListEndpointsResponse( - endpoints=[], - next_page_token='def', - ), - endpoint_service.ListEndpointsResponse( - endpoints=[ - endpoint.Endpoint(), - ], - next_page_token='ghi', - ), - endpoint_service.ListEndpointsResponse( - endpoints=[ - endpoint.Endpoint(), - endpoint.Endpoint(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_endpoints(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_update_endpoint(transport: str = 'grpc', request_type=endpoint_service.UpdateEndpointRequest): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_endpoint.Endpoint( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - network='network_value', - enable_private_service_connect=True, - model_deployment_monitoring_job='model_deployment_monitoring_job_value', - ) - response = client.update_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.UpdateEndpointRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_endpoint.Endpoint) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - assert response.network == 'network_value' - assert response.enable_private_service_connect is True - assert response.model_deployment_monitoring_job == 'model_deployment_monitoring_job_value' - - -def test_update_endpoint_from_dict(): - test_update_endpoint(request_type=dict) - - -def test_update_endpoint_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_endpoint), - '__call__') as call: - client.update_endpoint() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.UpdateEndpointRequest() - - -@pytest.mark.asyncio -async def test_update_endpoint_async(transport: str = 'grpc_asyncio', request_type=endpoint_service.UpdateEndpointRequest): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_endpoint.Endpoint( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - network='network_value', - enable_private_service_connect=True, - model_deployment_monitoring_job='model_deployment_monitoring_job_value', - )) - response = await client.update_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.UpdateEndpointRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_endpoint.Endpoint) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - assert response.network == 'network_value' - assert response.enable_private_service_connect is True - assert response.model_deployment_monitoring_job == 'model_deployment_monitoring_job_value' - - -@pytest.mark.asyncio -async def test_update_endpoint_async_from_dict(): - await test_update_endpoint_async(request_type=dict) - - -def test_update_endpoint_field_headers(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = endpoint_service.UpdateEndpointRequest() - - request.endpoint.name = 'endpoint.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_endpoint), - '__call__') as call: - call.return_value = gca_endpoint.Endpoint() - client.update_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'endpoint.name=endpoint.name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_endpoint_field_headers_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = endpoint_service.UpdateEndpointRequest() - - request.endpoint.name = 'endpoint.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_endpoint), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_endpoint.Endpoint()) - await client.update_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'endpoint.name=endpoint.name/value', - ) in kw['metadata'] - - -def test_update_endpoint_flattened(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_endpoint.Endpoint() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_endpoint( - endpoint=gca_endpoint.Endpoint(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].endpoint - mock_val = gca_endpoint.Endpoint(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -def test_update_endpoint_flattened_error(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_endpoint( - endpoint_service.UpdateEndpointRequest(), - endpoint=gca_endpoint.Endpoint(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.asyncio -async def test_update_endpoint_flattened_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_endpoint.Endpoint() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_endpoint.Endpoint()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_endpoint( - endpoint=gca_endpoint.Endpoint(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].endpoint - mock_val = gca_endpoint.Endpoint(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_update_endpoint_flattened_error_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_endpoint( - endpoint_service.UpdateEndpointRequest(), - endpoint=gca_endpoint.Endpoint(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_delete_endpoint(transport: str = 'grpc', request_type=endpoint_service.DeleteEndpointRequest): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.DeleteEndpointRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_endpoint_from_dict(): - test_delete_endpoint(request_type=dict) - - -def test_delete_endpoint_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_endpoint), - '__call__') as call: - client.delete_endpoint() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.DeleteEndpointRequest() - - -@pytest.mark.asyncio -async def test_delete_endpoint_async(transport: str = 'grpc_asyncio', request_type=endpoint_service.DeleteEndpointRequest): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.DeleteEndpointRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_endpoint_async_from_dict(): - await test_delete_endpoint_async(request_type=dict) - - -def test_delete_endpoint_field_headers(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = endpoint_service.DeleteEndpointRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_endpoint), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_endpoint_field_headers_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = endpoint_service.DeleteEndpointRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_endpoint), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_endpoint_flattened(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_endpoint( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_delete_endpoint_flattened_error(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_endpoint( - endpoint_service.DeleteEndpointRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_endpoint_flattened_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_endpoint( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_endpoint_flattened_error_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_endpoint( - endpoint_service.DeleteEndpointRequest(), - name='name_value', - ) - - -def test_deploy_model(transport: str = 'grpc', request_type=endpoint_service.DeployModelRequest): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.deploy_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.DeployModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_deploy_model_from_dict(): - test_deploy_model(request_type=dict) - - -def test_deploy_model_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_model), - '__call__') as call: - client.deploy_model() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.DeployModelRequest() - - -@pytest.mark.asyncio -async def test_deploy_model_async(transport: str = 'grpc_asyncio', request_type=endpoint_service.DeployModelRequest): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.deploy_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.DeployModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_deploy_model_async_from_dict(): - await test_deploy_model_async(request_type=dict) - - -def test_deploy_model_field_headers(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = endpoint_service.DeployModelRequest() - - request.endpoint = 'endpoint/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_model), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.deploy_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'endpoint=endpoint/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_deploy_model_field_headers_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = endpoint_service.DeployModelRequest() - - request.endpoint = 'endpoint/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_model), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.deploy_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'endpoint=endpoint/value', - ) in kw['metadata'] - - -def test_deploy_model_flattened(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.deploy_model( - endpoint='endpoint_value', - deployed_model=gca_endpoint.DeployedModel(dedicated_resources=machine_resources.DedicatedResources(machine_spec=machine_resources.MachineSpec(machine_type='machine_type_value'))), - traffic_split={'key_value': 541}, - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].endpoint - mock_val = 'endpoint_value' - assert arg == mock_val - arg = args[0].deployed_model - mock_val = gca_endpoint.DeployedModel(dedicated_resources=machine_resources.DedicatedResources(machine_spec=machine_resources.MachineSpec(machine_type='machine_type_value'))) - assert arg == mock_val - arg = args[0].traffic_split - mock_val = {'key_value': 541} - assert arg == mock_val - - -def test_deploy_model_flattened_error(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.deploy_model( - endpoint_service.DeployModelRequest(), - endpoint='endpoint_value', - deployed_model=gca_endpoint.DeployedModel(dedicated_resources=machine_resources.DedicatedResources(machine_spec=machine_resources.MachineSpec(machine_type='machine_type_value'))), - traffic_split={'key_value': 541}, - ) - - -@pytest.mark.asyncio -async def test_deploy_model_flattened_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.deploy_model( - endpoint='endpoint_value', - deployed_model=gca_endpoint.DeployedModel(dedicated_resources=machine_resources.DedicatedResources(machine_spec=machine_resources.MachineSpec(machine_type='machine_type_value'))), - traffic_split={'key_value': 541}, - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].endpoint - mock_val = 'endpoint_value' - assert arg == mock_val - arg = args[0].deployed_model - mock_val = gca_endpoint.DeployedModel(dedicated_resources=machine_resources.DedicatedResources(machine_spec=machine_resources.MachineSpec(machine_type='machine_type_value'))) - assert arg == mock_val - arg = args[0].traffic_split - mock_val = {'key_value': 541} - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_deploy_model_flattened_error_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.deploy_model( - endpoint_service.DeployModelRequest(), - endpoint='endpoint_value', - deployed_model=gca_endpoint.DeployedModel(dedicated_resources=machine_resources.DedicatedResources(machine_spec=machine_resources.MachineSpec(machine_type='machine_type_value'))), - traffic_split={'key_value': 541}, - ) - - -def test_undeploy_model(transport: str = 'grpc', request_type=endpoint_service.UndeployModelRequest): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.undeploy_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.UndeployModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_undeploy_model_from_dict(): - test_undeploy_model(request_type=dict) - - -def test_undeploy_model_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_model), - '__call__') as call: - client.undeploy_model() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.UndeployModelRequest() - - -@pytest.mark.asyncio -async def test_undeploy_model_async(transport: str = 'grpc_asyncio', request_type=endpoint_service.UndeployModelRequest): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.undeploy_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.UndeployModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_undeploy_model_async_from_dict(): - await test_undeploy_model_async(request_type=dict) - - -def test_undeploy_model_field_headers(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = endpoint_service.UndeployModelRequest() - - request.endpoint = 'endpoint/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_model), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.undeploy_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'endpoint=endpoint/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_undeploy_model_field_headers_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = endpoint_service.UndeployModelRequest() - - request.endpoint = 'endpoint/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_model), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.undeploy_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'endpoint=endpoint/value', - ) in kw['metadata'] - - -def test_undeploy_model_flattened(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.undeploy_model( - endpoint='endpoint_value', - deployed_model_id='deployed_model_id_value', - traffic_split={'key_value': 541}, - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].endpoint - mock_val = 'endpoint_value' - assert arg == mock_val - arg = args[0].deployed_model_id - mock_val = 'deployed_model_id_value' - assert arg == mock_val - arg = args[0].traffic_split - mock_val = {'key_value': 541} - assert arg == mock_val - - -def test_undeploy_model_flattened_error(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.undeploy_model( - endpoint_service.UndeployModelRequest(), - endpoint='endpoint_value', - deployed_model_id='deployed_model_id_value', - traffic_split={'key_value': 541}, - ) - - -@pytest.mark.asyncio -async def test_undeploy_model_flattened_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.undeploy_model( - endpoint='endpoint_value', - deployed_model_id='deployed_model_id_value', - traffic_split={'key_value': 541}, - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].endpoint - mock_val = 'endpoint_value' - assert arg == mock_val - arg = args[0].deployed_model_id - mock_val = 'deployed_model_id_value' - assert arg == mock_val - arg = args[0].traffic_split - mock_val = {'key_value': 541} - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_undeploy_model_flattened_error_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.undeploy_model( - endpoint_service.UndeployModelRequest(), - endpoint='endpoint_value', - deployed_model_id='deployed_model_id_value', - traffic_split={'key_value': 541}, - ) - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.EndpointServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.EndpointServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = EndpointServiceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.EndpointServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = EndpointServiceClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.EndpointServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = EndpointServiceClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.EndpointServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.EndpointServiceGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.EndpointServiceGrpcTransport, - transports.EndpointServiceGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.EndpointServiceGrpcTransport, - ) - -def test_endpoint_service_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.EndpointServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_endpoint_service_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.aiplatform_v1.services.endpoint_service.transports.EndpointServiceTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.EndpointServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'create_endpoint', - 'get_endpoint', - 'list_endpoints', - 'update_endpoint', - 'delete_endpoint', - 'deploy_model', - 'undeploy_model', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - with pytest.raises(NotImplementedError): - transport.close() - - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client - - -def test_endpoint_service_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.endpoint_service.transports.EndpointServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.EndpointServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -def test_endpoint_service_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1.services.endpoint_service.transports.EndpointServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.EndpointServiceTransport() - adc.assert_called_once() - - -def test_endpoint_service_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - EndpointServiceClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.EndpointServiceGrpcTransport, - transports.EndpointServiceGrpcAsyncIOTransport, - ], -) -def test_endpoint_service_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.EndpointServiceGrpcTransport, grpc_helpers), - (transports.EndpointServiceGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_endpoint_service_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "aiplatform.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="aiplatform.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.EndpointServiceGrpcTransport, transports.EndpointServiceGrpcAsyncIOTransport]) -def test_endpoint_service_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_endpoint_service_host_no_port(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_endpoint_service_host_with_port(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:8000' - -def test_endpoint_service_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.EndpointServiceGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_endpoint_service_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.EndpointServiceGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.EndpointServiceGrpcTransport, transports.EndpointServiceGrpcAsyncIOTransport]) -def test_endpoint_service_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.EndpointServiceGrpcTransport, transports.EndpointServiceGrpcAsyncIOTransport]) -def test_endpoint_service_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_endpoint_service_grpc_lro_client(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_endpoint_service_grpc_lro_async_client(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc_asyncio', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsAsyncClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_endpoint_path(): - project = "squid" - location = "clam" - endpoint = "whelk" - expected = "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) - actual = EndpointServiceClient.endpoint_path(project, location, endpoint) - assert expected == actual - - -def test_parse_endpoint_path(): - expected = { - "project": "octopus", - "location": "oyster", - "endpoint": "nudibranch", - } - path = EndpointServiceClient.endpoint_path(**expected) - - # Check that the path construction is reversible. - actual = EndpointServiceClient.parse_endpoint_path(path) - assert expected == actual - -def test_model_path(): - project = "cuttlefish" - location = "mussel" - model = "winkle" - expected = "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) - actual = EndpointServiceClient.model_path(project, location, model) - assert expected == actual - - -def test_parse_model_path(): - expected = { - "project": "nautilus", - "location": "scallop", - "model": "abalone", - } - path = EndpointServiceClient.model_path(**expected) - - # Check that the path construction is reversible. - actual = EndpointServiceClient.parse_model_path(path) - assert expected == actual - -def test_model_deployment_monitoring_job_path(): - project = "squid" - location = "clam" - model_deployment_monitoring_job = "whelk" - expected = "projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}".format(project=project, location=location, model_deployment_monitoring_job=model_deployment_monitoring_job, ) - actual = EndpointServiceClient.model_deployment_monitoring_job_path(project, location, model_deployment_monitoring_job) - assert expected == actual - - -def test_parse_model_deployment_monitoring_job_path(): - expected = { - "project": "octopus", - "location": "oyster", - "model_deployment_monitoring_job": "nudibranch", - } - path = EndpointServiceClient.model_deployment_monitoring_job_path(**expected) - - # Check that the path construction is reversible. - actual = EndpointServiceClient.parse_model_deployment_monitoring_job_path(path) - assert expected == actual - -def test_network_path(): - project = "cuttlefish" - network = "mussel" - expected = "projects/{project}/global/networks/{network}".format(project=project, network=network, ) - actual = EndpointServiceClient.network_path(project, network) - assert expected == actual - - -def test_parse_network_path(): - expected = { - "project": "winkle", - "network": "nautilus", - } - path = EndpointServiceClient.network_path(**expected) - - # Check that the path construction is reversible. - actual = EndpointServiceClient.parse_network_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "scallop" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = EndpointServiceClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "abalone", - } - path = EndpointServiceClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = EndpointServiceClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "squid" - expected = "folders/{folder}".format(folder=folder, ) - actual = EndpointServiceClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "clam", - } - path = EndpointServiceClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = EndpointServiceClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "whelk" - expected = "organizations/{organization}".format(organization=organization, ) - actual = EndpointServiceClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "octopus", - } - path = EndpointServiceClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = EndpointServiceClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "oyster" - expected = "projects/{project}".format(project=project, ) - actual = EndpointServiceClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "nudibranch", - } - path = EndpointServiceClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = EndpointServiceClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "cuttlefish" - location = "mussel" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = EndpointServiceClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "winkle", - "location": "nautilus", - } - path = EndpointServiceClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = EndpointServiceClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.EndpointServiceTransport, '_prep_wrapped_messages') as prep: - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.EndpointServiceTransport, '_prep_wrapped_messages') as prep: - transport_class = EndpointServiceClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - -@pytest.mark.asyncio -async def test_transport_close_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: - async with client: - close.assert_not_called() - close.assert_called_once() - -def test_transport_close(): - transports = { - "grpc": "_grpc_channel", - } - - for transport, close_name in transports.items(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: - with client: - close.assert_not_called() - close.assert_called_once() - -def test_client_ctx(): - transports = [ - 'grpc', - ] - for transport in transports: - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - # Test client calls underlying transport. - with mock.patch.object(type(client.transport), "close") as close: - close.assert_not_called() - with client: - pass - close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_featurestore_online_serving_service.py b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_featurestore_online_serving_service.py deleted file mode 100644 index cdaf5df939..0000000000 --- a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_featurestore_online_serving_service.py +++ /dev/null @@ -1,1382 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import path_template -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.aiplatform_v1.services.featurestore_online_serving_service import FeaturestoreOnlineServingServiceAsyncClient -from google.cloud.aiplatform_v1.services.featurestore_online_serving_service import FeaturestoreOnlineServingServiceClient -from google.cloud.aiplatform_v1.services.featurestore_online_serving_service import transports -from google.cloud.aiplatform_v1.types import feature_selector -from google.cloud.aiplatform_v1.types import featurestore_online_service -from google.oauth2 import service_account -import google.auth - - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(None) is None - assert FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - FeaturestoreOnlineServingServiceClient, - FeaturestoreOnlineServingServiceAsyncClient, -]) -def test_featurestore_online_serving_service_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.FeaturestoreOnlineServingServiceGrpcTransport, "grpc"), - (transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_featurestore_online_serving_service_client_service_account_always_use_jwt(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=False) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("client_class", [ - FeaturestoreOnlineServingServiceClient, - FeaturestoreOnlineServingServiceAsyncClient, -]) -def test_featurestore_online_serving_service_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_featurestore_online_serving_service_client_get_transport_class(): - transport = FeaturestoreOnlineServingServiceClient.get_transport_class() - available_transports = [ - transports.FeaturestoreOnlineServingServiceGrpcTransport, - ] - assert transport in available_transports - - transport = FeaturestoreOnlineServingServiceClient.get_transport_class("grpc") - assert transport == transports.FeaturestoreOnlineServingServiceGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (FeaturestoreOnlineServingServiceClient, transports.FeaturestoreOnlineServingServiceGrpcTransport, "grpc"), - (FeaturestoreOnlineServingServiceAsyncClient, transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(FeaturestoreOnlineServingServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FeaturestoreOnlineServingServiceClient)) -@mock.patch.object(FeaturestoreOnlineServingServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FeaturestoreOnlineServingServiceAsyncClient)) -def test_featurestore_online_serving_service_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(FeaturestoreOnlineServingServiceClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(FeaturestoreOnlineServingServiceClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (FeaturestoreOnlineServingServiceClient, transports.FeaturestoreOnlineServingServiceGrpcTransport, "grpc", "true"), - (FeaturestoreOnlineServingServiceAsyncClient, transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (FeaturestoreOnlineServingServiceClient, transports.FeaturestoreOnlineServingServiceGrpcTransport, "grpc", "false"), - (FeaturestoreOnlineServingServiceAsyncClient, transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(FeaturestoreOnlineServingServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FeaturestoreOnlineServingServiceClient)) -@mock.patch.object(FeaturestoreOnlineServingServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FeaturestoreOnlineServingServiceAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_featurestore_online_serving_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (FeaturestoreOnlineServingServiceClient, transports.FeaturestoreOnlineServingServiceGrpcTransport, "grpc"), - (FeaturestoreOnlineServingServiceAsyncClient, transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_featurestore_online_serving_service_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (FeaturestoreOnlineServingServiceClient, transports.FeaturestoreOnlineServingServiceGrpcTransport, "grpc"), - (FeaturestoreOnlineServingServiceAsyncClient, transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_featurestore_online_serving_service_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_featurestore_online_serving_service_client_client_options_from_dict(): - with mock.patch('google.cloud.aiplatform_v1.services.featurestore_online_serving_service.transports.FeaturestoreOnlineServingServiceGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = FeaturestoreOnlineServingServiceClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_read_feature_values(transport: str = 'grpc', request_type=featurestore_online_service.ReadFeatureValuesRequest): - client = FeaturestoreOnlineServingServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_feature_values), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = featurestore_online_service.ReadFeatureValuesResponse( - ) - response = client.read_feature_values(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_online_service.ReadFeatureValuesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, featurestore_online_service.ReadFeatureValuesResponse) - - -def test_read_feature_values_from_dict(): - test_read_feature_values(request_type=dict) - - -def test_read_feature_values_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreOnlineServingServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_feature_values), - '__call__') as call: - client.read_feature_values() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_online_service.ReadFeatureValuesRequest() - - -@pytest.mark.asyncio -async def test_read_feature_values_async(transport: str = 'grpc_asyncio', request_type=featurestore_online_service.ReadFeatureValuesRequest): - client = FeaturestoreOnlineServingServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_feature_values), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(featurestore_online_service.ReadFeatureValuesResponse( - )) - response = await client.read_feature_values(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_online_service.ReadFeatureValuesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, featurestore_online_service.ReadFeatureValuesResponse) - - -@pytest.mark.asyncio -async def test_read_feature_values_async_from_dict(): - await test_read_feature_values_async(request_type=dict) - - -def test_read_feature_values_field_headers(): - client = FeaturestoreOnlineServingServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_online_service.ReadFeatureValuesRequest() - - request.entity_type = 'entity_type/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_feature_values), - '__call__') as call: - call.return_value = featurestore_online_service.ReadFeatureValuesResponse() - client.read_feature_values(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'entity_type=entity_type/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_read_feature_values_field_headers_async(): - client = FeaturestoreOnlineServingServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_online_service.ReadFeatureValuesRequest() - - request.entity_type = 'entity_type/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_feature_values), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_online_service.ReadFeatureValuesResponse()) - await client.read_feature_values(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'entity_type=entity_type/value', - ) in kw['metadata'] - - -def test_read_feature_values_flattened(): - client = FeaturestoreOnlineServingServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_feature_values), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = featurestore_online_service.ReadFeatureValuesResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.read_feature_values( - entity_type='entity_type_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].entity_type - mock_val = 'entity_type_value' - assert arg == mock_val - - -def test_read_feature_values_flattened_error(): - client = FeaturestoreOnlineServingServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.read_feature_values( - featurestore_online_service.ReadFeatureValuesRequest(), - entity_type='entity_type_value', - ) - - -@pytest.mark.asyncio -async def test_read_feature_values_flattened_async(): - client = FeaturestoreOnlineServingServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_feature_values), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = featurestore_online_service.ReadFeatureValuesResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_online_service.ReadFeatureValuesResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.read_feature_values( - entity_type='entity_type_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].entity_type - mock_val = 'entity_type_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_read_feature_values_flattened_error_async(): - client = FeaturestoreOnlineServingServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.read_feature_values( - featurestore_online_service.ReadFeatureValuesRequest(), - entity_type='entity_type_value', - ) - - -def test_streaming_read_feature_values(transport: str = 'grpc', request_type=featurestore_online_service.StreamingReadFeatureValuesRequest): - client = FeaturestoreOnlineServingServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.streaming_read_feature_values), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = iter([featurestore_online_service.ReadFeatureValuesResponse()]) - response = client.streaming_read_feature_values(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_online_service.StreamingReadFeatureValuesRequest() - - # Establish that the response is the type that we expect. - for message in response: - assert isinstance(message, featurestore_online_service.ReadFeatureValuesResponse) - - -def test_streaming_read_feature_values_from_dict(): - test_streaming_read_feature_values(request_type=dict) - - -def test_streaming_read_feature_values_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreOnlineServingServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.streaming_read_feature_values), - '__call__') as call: - client.streaming_read_feature_values() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_online_service.StreamingReadFeatureValuesRequest() - - -@pytest.mark.asyncio -async def test_streaming_read_feature_values_async(transport: str = 'grpc_asyncio', request_type=featurestore_online_service.StreamingReadFeatureValuesRequest): - client = FeaturestoreOnlineServingServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.streaming_read_feature_values), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) - call.return_value.read = mock.AsyncMock(side_effect=[featurestore_online_service.ReadFeatureValuesResponse()]) - response = await client.streaming_read_feature_values(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_online_service.StreamingReadFeatureValuesRequest() - - # Establish that the response is the type that we expect. - message = await response.read() - assert isinstance(message, featurestore_online_service.ReadFeatureValuesResponse) - - -@pytest.mark.asyncio -async def test_streaming_read_feature_values_async_from_dict(): - await test_streaming_read_feature_values_async(request_type=dict) - - -def test_streaming_read_feature_values_field_headers(): - client = FeaturestoreOnlineServingServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_online_service.StreamingReadFeatureValuesRequest() - - request.entity_type = 'entity_type/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.streaming_read_feature_values), - '__call__') as call: - call.return_value = iter([featurestore_online_service.ReadFeatureValuesResponse()]) - client.streaming_read_feature_values(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'entity_type=entity_type/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_streaming_read_feature_values_field_headers_async(): - client = FeaturestoreOnlineServingServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_online_service.StreamingReadFeatureValuesRequest() - - request.entity_type = 'entity_type/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.streaming_read_feature_values), - '__call__') as call: - call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) - call.return_value.read = mock.AsyncMock(side_effect=[featurestore_online_service.ReadFeatureValuesResponse()]) - await client.streaming_read_feature_values(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'entity_type=entity_type/value', - ) in kw['metadata'] - - -def test_streaming_read_feature_values_flattened(): - client = FeaturestoreOnlineServingServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.streaming_read_feature_values), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = iter([featurestore_online_service.ReadFeatureValuesResponse()]) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.streaming_read_feature_values( - entity_type='entity_type_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].entity_type - mock_val = 'entity_type_value' - assert arg == mock_val - - -def test_streaming_read_feature_values_flattened_error(): - client = FeaturestoreOnlineServingServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.streaming_read_feature_values( - featurestore_online_service.StreamingReadFeatureValuesRequest(), - entity_type='entity_type_value', - ) - - -@pytest.mark.asyncio -async def test_streaming_read_feature_values_flattened_async(): - client = FeaturestoreOnlineServingServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.streaming_read_feature_values), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = iter([featurestore_online_service.ReadFeatureValuesResponse()]) - - call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.streaming_read_feature_values( - entity_type='entity_type_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].entity_type - mock_val = 'entity_type_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_streaming_read_feature_values_flattened_error_async(): - client = FeaturestoreOnlineServingServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.streaming_read_feature_values( - featurestore_online_service.StreamingReadFeatureValuesRequest(), - entity_type='entity_type_value', - ) - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.FeaturestoreOnlineServingServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = FeaturestoreOnlineServingServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.FeaturestoreOnlineServingServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = FeaturestoreOnlineServingServiceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.FeaturestoreOnlineServingServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = FeaturestoreOnlineServingServiceClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.FeaturestoreOnlineServingServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = FeaturestoreOnlineServingServiceClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.FeaturestoreOnlineServingServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.FeaturestoreOnlineServingServiceGrpcTransport, - transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = FeaturestoreOnlineServingServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.FeaturestoreOnlineServingServiceGrpcTransport, - ) - -def test_featurestore_online_serving_service_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.FeaturestoreOnlineServingServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_featurestore_online_serving_service_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.aiplatform_v1.services.featurestore_online_serving_service.transports.FeaturestoreOnlineServingServiceTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.FeaturestoreOnlineServingServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'read_feature_values', - 'streaming_read_feature_values', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - with pytest.raises(NotImplementedError): - transport.close() - - -def test_featurestore_online_serving_service_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.featurestore_online_serving_service.transports.FeaturestoreOnlineServingServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.FeaturestoreOnlineServingServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -def test_featurestore_online_serving_service_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1.services.featurestore_online_serving_service.transports.FeaturestoreOnlineServingServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.FeaturestoreOnlineServingServiceTransport() - adc.assert_called_once() - - -def test_featurestore_online_serving_service_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - FeaturestoreOnlineServingServiceClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.FeaturestoreOnlineServingServiceGrpcTransport, - transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, - ], -) -def test_featurestore_online_serving_service_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.FeaturestoreOnlineServingServiceGrpcTransport, grpc_helpers), - (transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_featurestore_online_serving_service_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "aiplatform.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="aiplatform.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.FeaturestoreOnlineServingServiceGrpcTransport, transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport]) -def test_featurestore_online_serving_service_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_featurestore_online_serving_service_host_no_port(): - client = FeaturestoreOnlineServingServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_featurestore_online_serving_service_host_with_port(): - client = FeaturestoreOnlineServingServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:8000' - -def test_featurestore_online_serving_service_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.FeaturestoreOnlineServingServiceGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_featurestore_online_serving_service_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.FeaturestoreOnlineServingServiceGrpcTransport, transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport]) -def test_featurestore_online_serving_service_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.FeaturestoreOnlineServingServiceGrpcTransport, transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport]) -def test_featurestore_online_serving_service_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_entity_type_path(): - project = "squid" - location = "clam" - featurestore = "whelk" - entity_type = "octopus" - expected = "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}".format(project=project, location=location, featurestore=featurestore, entity_type=entity_type, ) - actual = FeaturestoreOnlineServingServiceClient.entity_type_path(project, location, featurestore, entity_type) - assert expected == actual - - -def test_parse_entity_type_path(): - expected = { - "project": "oyster", - "location": "nudibranch", - "featurestore": "cuttlefish", - "entity_type": "mussel", - } - path = FeaturestoreOnlineServingServiceClient.entity_type_path(**expected) - - # Check that the path construction is reversible. - actual = FeaturestoreOnlineServingServiceClient.parse_entity_type_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "winkle" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = FeaturestoreOnlineServingServiceClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "nautilus", - } - path = FeaturestoreOnlineServingServiceClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = FeaturestoreOnlineServingServiceClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "scallop" - expected = "folders/{folder}".format(folder=folder, ) - actual = FeaturestoreOnlineServingServiceClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "abalone", - } - path = FeaturestoreOnlineServingServiceClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = FeaturestoreOnlineServingServiceClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "squid" - expected = "organizations/{organization}".format(organization=organization, ) - actual = FeaturestoreOnlineServingServiceClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "clam", - } - path = FeaturestoreOnlineServingServiceClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = FeaturestoreOnlineServingServiceClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "whelk" - expected = "projects/{project}".format(project=project, ) - actual = FeaturestoreOnlineServingServiceClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "octopus", - } - path = FeaturestoreOnlineServingServiceClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = FeaturestoreOnlineServingServiceClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "oyster" - location = "nudibranch" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = FeaturestoreOnlineServingServiceClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "cuttlefish", - "location": "mussel", - } - path = FeaturestoreOnlineServingServiceClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = FeaturestoreOnlineServingServiceClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.FeaturestoreOnlineServingServiceTransport, '_prep_wrapped_messages') as prep: - client = FeaturestoreOnlineServingServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.FeaturestoreOnlineServingServiceTransport, '_prep_wrapped_messages') as prep: - transport_class = FeaturestoreOnlineServingServiceClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - -@pytest.mark.asyncio -async def test_transport_close_async(): - client = FeaturestoreOnlineServingServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: - async with client: - close.assert_not_called() - close.assert_called_once() - -def test_transport_close(): - transports = { - "grpc": "_grpc_channel", - } - - for transport, close_name in transports.items(): - client = FeaturestoreOnlineServingServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: - with client: - close.assert_not_called() - close.assert_called_once() - -def test_client_ctx(): - transports = [ - 'grpc', - ] - for transport in transports: - client = FeaturestoreOnlineServingServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - # Test client calls underlying transport. - with mock.patch.object(type(client.transport), "close") as close: - close.assert_not_called() - with client: - pass - close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_featurestore_service.py b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_featurestore_service.py deleted file mode 100644 index 39ee8ec52e..0000000000 --- a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_featurestore_service.py +++ /dev/null @@ -1,6638 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import future -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import operation_async # type: ignore -from google.api_core import operations_v1 -from google.api_core import path_template -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.aiplatform_v1.services.featurestore_service import FeaturestoreServiceAsyncClient -from google.cloud.aiplatform_v1.services.featurestore_service import FeaturestoreServiceClient -from google.cloud.aiplatform_v1.services.featurestore_service import pagers -from google.cloud.aiplatform_v1.services.featurestore_service import transports -from google.cloud.aiplatform_v1.types import encryption_spec -from google.cloud.aiplatform_v1.types import entity_type -from google.cloud.aiplatform_v1.types import entity_type as gca_entity_type -from google.cloud.aiplatform_v1.types import feature -from google.cloud.aiplatform_v1.types import feature as gca_feature -from google.cloud.aiplatform_v1.types import feature_selector -from google.cloud.aiplatform_v1.types import featurestore -from google.cloud.aiplatform_v1.types import featurestore as gca_featurestore -from google.cloud.aiplatform_v1.types import featurestore_service -from google.cloud.aiplatform_v1.types import io -from google.cloud.aiplatform_v1.types import operation as gca_operation -from google.longrunning import operations_pb2 -from google.oauth2 import service_account -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -import google.auth - - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert FeaturestoreServiceClient._get_default_mtls_endpoint(None) is None - assert FeaturestoreServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert FeaturestoreServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert FeaturestoreServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert FeaturestoreServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert FeaturestoreServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - FeaturestoreServiceClient, - FeaturestoreServiceAsyncClient, -]) -def test_featurestore_service_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.FeaturestoreServiceGrpcTransport, "grpc"), - (transports.FeaturestoreServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_featurestore_service_client_service_account_always_use_jwt(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=False) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("client_class", [ - FeaturestoreServiceClient, - FeaturestoreServiceAsyncClient, -]) -def test_featurestore_service_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_featurestore_service_client_get_transport_class(): - transport = FeaturestoreServiceClient.get_transport_class() - available_transports = [ - transports.FeaturestoreServiceGrpcTransport, - ] - assert transport in available_transports - - transport = FeaturestoreServiceClient.get_transport_class("grpc") - assert transport == transports.FeaturestoreServiceGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (FeaturestoreServiceClient, transports.FeaturestoreServiceGrpcTransport, "grpc"), - (FeaturestoreServiceAsyncClient, transports.FeaturestoreServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(FeaturestoreServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FeaturestoreServiceClient)) -@mock.patch.object(FeaturestoreServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FeaturestoreServiceAsyncClient)) -def test_featurestore_service_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(FeaturestoreServiceClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(FeaturestoreServiceClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (FeaturestoreServiceClient, transports.FeaturestoreServiceGrpcTransport, "grpc", "true"), - (FeaturestoreServiceAsyncClient, transports.FeaturestoreServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (FeaturestoreServiceClient, transports.FeaturestoreServiceGrpcTransport, "grpc", "false"), - (FeaturestoreServiceAsyncClient, transports.FeaturestoreServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(FeaturestoreServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FeaturestoreServiceClient)) -@mock.patch.object(FeaturestoreServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FeaturestoreServiceAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_featurestore_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (FeaturestoreServiceClient, transports.FeaturestoreServiceGrpcTransport, "grpc"), - (FeaturestoreServiceAsyncClient, transports.FeaturestoreServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_featurestore_service_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (FeaturestoreServiceClient, transports.FeaturestoreServiceGrpcTransport, "grpc"), - (FeaturestoreServiceAsyncClient, transports.FeaturestoreServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_featurestore_service_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_featurestore_service_client_client_options_from_dict(): - with mock.patch('google.cloud.aiplatform_v1.services.featurestore_service.transports.FeaturestoreServiceGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = FeaturestoreServiceClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_create_featurestore(transport: str = 'grpc', request_type=featurestore_service.CreateFeaturestoreRequest): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_featurestore), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.create_featurestore(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.CreateFeaturestoreRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_create_featurestore_from_dict(): - test_create_featurestore(request_type=dict) - - -def test_create_featurestore_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_featurestore), - '__call__') as call: - client.create_featurestore() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.CreateFeaturestoreRequest() - - -@pytest.mark.asyncio -async def test_create_featurestore_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.CreateFeaturestoreRequest): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_featurestore), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.create_featurestore(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.CreateFeaturestoreRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_create_featurestore_async_from_dict(): - await test_create_featurestore_async(request_type=dict) - - -def test_create_featurestore_field_headers(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.CreateFeaturestoreRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_featurestore), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.create_featurestore(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_featurestore_field_headers_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.CreateFeaturestoreRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_featurestore), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.create_featurestore(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_featurestore_flattened(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_featurestore), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_featurestore( - parent='parent_value', - featurestore=gca_featurestore.Featurestore(name='name_value'), - featurestore_id='featurestore_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].featurestore - mock_val = gca_featurestore.Featurestore(name='name_value') - assert arg == mock_val - arg = args[0].featurestore_id - mock_val = 'featurestore_id_value' - assert arg == mock_val - - -def test_create_featurestore_flattened_error(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_featurestore( - featurestore_service.CreateFeaturestoreRequest(), - parent='parent_value', - featurestore=gca_featurestore.Featurestore(name='name_value'), - featurestore_id='featurestore_id_value', - ) - - -@pytest.mark.asyncio -async def test_create_featurestore_flattened_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_featurestore), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_featurestore( - parent='parent_value', - featurestore=gca_featurestore.Featurestore(name='name_value'), - featurestore_id='featurestore_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].featurestore - mock_val = gca_featurestore.Featurestore(name='name_value') - assert arg == mock_val - arg = args[0].featurestore_id - mock_val = 'featurestore_id_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_featurestore_flattened_error_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_featurestore( - featurestore_service.CreateFeaturestoreRequest(), - parent='parent_value', - featurestore=gca_featurestore.Featurestore(name='name_value'), - featurestore_id='featurestore_id_value', - ) - - -def test_get_featurestore(transport: str = 'grpc', request_type=featurestore_service.GetFeaturestoreRequest): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_featurestore), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = featurestore.Featurestore( - name='name_value', - etag='etag_value', - state=featurestore.Featurestore.State.STABLE, - ) - response = client.get_featurestore(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.GetFeaturestoreRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, featurestore.Featurestore) - assert response.name == 'name_value' - assert response.etag == 'etag_value' - assert response.state == featurestore.Featurestore.State.STABLE - - -def test_get_featurestore_from_dict(): - test_get_featurestore(request_type=dict) - - -def test_get_featurestore_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_featurestore), - '__call__') as call: - client.get_featurestore() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.GetFeaturestoreRequest() - - -@pytest.mark.asyncio -async def test_get_featurestore_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.GetFeaturestoreRequest): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_featurestore), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(featurestore.Featurestore( - name='name_value', - etag='etag_value', - state=featurestore.Featurestore.State.STABLE, - )) - response = await client.get_featurestore(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.GetFeaturestoreRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, featurestore.Featurestore) - assert response.name == 'name_value' - assert response.etag == 'etag_value' - assert response.state == featurestore.Featurestore.State.STABLE - - -@pytest.mark.asyncio -async def test_get_featurestore_async_from_dict(): - await test_get_featurestore_async(request_type=dict) - - -def test_get_featurestore_field_headers(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.GetFeaturestoreRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_featurestore), - '__call__') as call: - call.return_value = featurestore.Featurestore() - client.get_featurestore(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_featurestore_field_headers_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.GetFeaturestoreRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_featurestore), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore.Featurestore()) - await client.get_featurestore(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_featurestore_flattened(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_featurestore), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = featurestore.Featurestore() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_featurestore( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_featurestore_flattened_error(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_featurestore( - featurestore_service.GetFeaturestoreRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_featurestore_flattened_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_featurestore), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = featurestore.Featurestore() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore.Featurestore()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_featurestore( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_featurestore_flattened_error_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_featurestore( - featurestore_service.GetFeaturestoreRequest(), - name='name_value', - ) - - -def test_list_featurestores(transport: str = 'grpc', request_type=featurestore_service.ListFeaturestoresRequest): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_featurestores), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = featurestore_service.ListFeaturestoresResponse( - next_page_token='next_page_token_value', - ) - response = client.list_featurestores(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.ListFeaturestoresRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListFeaturestoresPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_featurestores_from_dict(): - test_list_featurestores(request_type=dict) - - -def test_list_featurestores_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_featurestores), - '__call__') as call: - client.list_featurestores() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.ListFeaturestoresRequest() - - -@pytest.mark.asyncio -async def test_list_featurestores_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.ListFeaturestoresRequest): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_featurestores), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.ListFeaturestoresResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_featurestores(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.ListFeaturestoresRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListFeaturestoresAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_featurestores_async_from_dict(): - await test_list_featurestores_async(request_type=dict) - - -def test_list_featurestores_field_headers(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.ListFeaturestoresRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_featurestores), - '__call__') as call: - call.return_value = featurestore_service.ListFeaturestoresResponse() - client.list_featurestores(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_featurestores_field_headers_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.ListFeaturestoresRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_featurestores), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.ListFeaturestoresResponse()) - await client.list_featurestores(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_featurestores_flattened(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_featurestores), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = featurestore_service.ListFeaturestoresResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_featurestores( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_featurestores_flattened_error(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_featurestores( - featurestore_service.ListFeaturestoresRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_featurestores_flattened_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_featurestores), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = featurestore_service.ListFeaturestoresResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.ListFeaturestoresResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_featurestores( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_featurestores_flattened_error_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_featurestores( - featurestore_service.ListFeaturestoresRequest(), - parent='parent_value', - ) - - -def test_list_featurestores_pager(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_featurestores), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - featurestore_service.ListFeaturestoresResponse( - featurestores=[ - featurestore.Featurestore(), - featurestore.Featurestore(), - featurestore.Featurestore(), - ], - next_page_token='abc', - ), - featurestore_service.ListFeaturestoresResponse( - featurestores=[], - next_page_token='def', - ), - featurestore_service.ListFeaturestoresResponse( - featurestores=[ - featurestore.Featurestore(), - ], - next_page_token='ghi', - ), - featurestore_service.ListFeaturestoresResponse( - featurestores=[ - featurestore.Featurestore(), - featurestore.Featurestore(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_featurestores(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, featurestore.Featurestore) - for i in results) - -def test_list_featurestores_pages(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_featurestores), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - featurestore_service.ListFeaturestoresResponse( - featurestores=[ - featurestore.Featurestore(), - featurestore.Featurestore(), - featurestore.Featurestore(), - ], - next_page_token='abc', - ), - featurestore_service.ListFeaturestoresResponse( - featurestores=[], - next_page_token='def', - ), - featurestore_service.ListFeaturestoresResponse( - featurestores=[ - featurestore.Featurestore(), - ], - next_page_token='ghi', - ), - featurestore_service.ListFeaturestoresResponse( - featurestores=[ - featurestore.Featurestore(), - featurestore.Featurestore(), - ], - ), - RuntimeError, - ) - pages = list(client.list_featurestores(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_featurestores_async_pager(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_featurestores), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - featurestore_service.ListFeaturestoresResponse( - featurestores=[ - featurestore.Featurestore(), - featurestore.Featurestore(), - featurestore.Featurestore(), - ], - next_page_token='abc', - ), - featurestore_service.ListFeaturestoresResponse( - featurestores=[], - next_page_token='def', - ), - featurestore_service.ListFeaturestoresResponse( - featurestores=[ - featurestore.Featurestore(), - ], - next_page_token='ghi', - ), - featurestore_service.ListFeaturestoresResponse( - featurestores=[ - featurestore.Featurestore(), - featurestore.Featurestore(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_featurestores(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, featurestore.Featurestore) - for i in responses) - -@pytest.mark.asyncio -async def test_list_featurestores_async_pages(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_featurestores), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - featurestore_service.ListFeaturestoresResponse( - featurestores=[ - featurestore.Featurestore(), - featurestore.Featurestore(), - featurestore.Featurestore(), - ], - next_page_token='abc', - ), - featurestore_service.ListFeaturestoresResponse( - featurestores=[], - next_page_token='def', - ), - featurestore_service.ListFeaturestoresResponse( - featurestores=[ - featurestore.Featurestore(), - ], - next_page_token='ghi', - ), - featurestore_service.ListFeaturestoresResponse( - featurestores=[ - featurestore.Featurestore(), - featurestore.Featurestore(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_featurestores(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_update_featurestore(transport: str = 'grpc', request_type=featurestore_service.UpdateFeaturestoreRequest): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_featurestore), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.update_featurestore(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.UpdateFeaturestoreRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_update_featurestore_from_dict(): - test_update_featurestore(request_type=dict) - - -def test_update_featurestore_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_featurestore), - '__call__') as call: - client.update_featurestore() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.UpdateFeaturestoreRequest() - - -@pytest.mark.asyncio -async def test_update_featurestore_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.UpdateFeaturestoreRequest): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_featurestore), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.update_featurestore(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.UpdateFeaturestoreRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_update_featurestore_async_from_dict(): - await test_update_featurestore_async(request_type=dict) - - -def test_update_featurestore_field_headers(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.UpdateFeaturestoreRequest() - - request.featurestore.name = 'featurestore.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_featurestore), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.update_featurestore(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'featurestore.name=featurestore.name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_featurestore_field_headers_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.UpdateFeaturestoreRequest() - - request.featurestore.name = 'featurestore.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_featurestore), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.update_featurestore(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'featurestore.name=featurestore.name/value', - ) in kw['metadata'] - - -def test_update_featurestore_flattened(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_featurestore), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_featurestore( - featurestore=gca_featurestore.Featurestore(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].featurestore - mock_val = gca_featurestore.Featurestore(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -def test_update_featurestore_flattened_error(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_featurestore( - featurestore_service.UpdateFeaturestoreRequest(), - featurestore=gca_featurestore.Featurestore(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.asyncio -async def test_update_featurestore_flattened_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_featurestore), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_featurestore( - featurestore=gca_featurestore.Featurestore(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].featurestore - mock_val = gca_featurestore.Featurestore(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_update_featurestore_flattened_error_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_featurestore( - featurestore_service.UpdateFeaturestoreRequest(), - featurestore=gca_featurestore.Featurestore(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_delete_featurestore(transport: str = 'grpc', request_type=featurestore_service.DeleteFeaturestoreRequest): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_featurestore), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_featurestore(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.DeleteFeaturestoreRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_featurestore_from_dict(): - test_delete_featurestore(request_type=dict) - - -def test_delete_featurestore_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_featurestore), - '__call__') as call: - client.delete_featurestore() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.DeleteFeaturestoreRequest() - - -@pytest.mark.asyncio -async def test_delete_featurestore_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.DeleteFeaturestoreRequest): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_featurestore), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_featurestore(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.DeleteFeaturestoreRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_featurestore_async_from_dict(): - await test_delete_featurestore_async(request_type=dict) - - -def test_delete_featurestore_field_headers(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.DeleteFeaturestoreRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_featurestore), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_featurestore(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_featurestore_field_headers_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.DeleteFeaturestoreRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_featurestore), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_featurestore(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_featurestore_flattened(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_featurestore), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_featurestore( - name='name_value', - force=True, - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - arg = args[0].force - mock_val = True - assert arg == mock_val - - -def test_delete_featurestore_flattened_error(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_featurestore( - featurestore_service.DeleteFeaturestoreRequest(), - name='name_value', - force=True, - ) - - -@pytest.mark.asyncio -async def test_delete_featurestore_flattened_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_featurestore), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_featurestore( - name='name_value', - force=True, - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - arg = args[0].force - mock_val = True - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_featurestore_flattened_error_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_featurestore( - featurestore_service.DeleteFeaturestoreRequest(), - name='name_value', - force=True, - ) - - -def test_create_entity_type(transport: str = 'grpc', request_type=featurestore_service.CreateEntityTypeRequest): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_entity_type), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.create_entity_type(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.CreateEntityTypeRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_create_entity_type_from_dict(): - test_create_entity_type(request_type=dict) - - -def test_create_entity_type_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_entity_type), - '__call__') as call: - client.create_entity_type() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.CreateEntityTypeRequest() - - -@pytest.mark.asyncio -async def test_create_entity_type_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.CreateEntityTypeRequest): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_entity_type), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.create_entity_type(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.CreateEntityTypeRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_create_entity_type_async_from_dict(): - await test_create_entity_type_async(request_type=dict) - - -def test_create_entity_type_field_headers(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.CreateEntityTypeRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_entity_type), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.create_entity_type(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_entity_type_field_headers_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.CreateEntityTypeRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_entity_type), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.create_entity_type(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_entity_type_flattened(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_entity_type), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_entity_type( - parent='parent_value', - entity_type=gca_entity_type.EntityType(name='name_value'), - entity_type_id='entity_type_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].entity_type - mock_val = gca_entity_type.EntityType(name='name_value') - assert arg == mock_val - arg = args[0].entity_type_id - mock_val = 'entity_type_id_value' - assert arg == mock_val - - -def test_create_entity_type_flattened_error(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_entity_type( - featurestore_service.CreateEntityTypeRequest(), - parent='parent_value', - entity_type=gca_entity_type.EntityType(name='name_value'), - entity_type_id='entity_type_id_value', - ) - - -@pytest.mark.asyncio -async def test_create_entity_type_flattened_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_entity_type), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_entity_type( - parent='parent_value', - entity_type=gca_entity_type.EntityType(name='name_value'), - entity_type_id='entity_type_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].entity_type - mock_val = gca_entity_type.EntityType(name='name_value') - assert arg == mock_val - arg = args[0].entity_type_id - mock_val = 'entity_type_id_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_entity_type_flattened_error_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_entity_type( - featurestore_service.CreateEntityTypeRequest(), - parent='parent_value', - entity_type=gca_entity_type.EntityType(name='name_value'), - entity_type_id='entity_type_id_value', - ) - - -def test_get_entity_type(transport: str = 'grpc', request_type=featurestore_service.GetEntityTypeRequest): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_entity_type), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = entity_type.EntityType( - name='name_value', - description='description_value', - etag='etag_value', - ) - response = client.get_entity_type(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.GetEntityTypeRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, entity_type.EntityType) - assert response.name == 'name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - - -def test_get_entity_type_from_dict(): - test_get_entity_type(request_type=dict) - - -def test_get_entity_type_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_entity_type), - '__call__') as call: - client.get_entity_type() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.GetEntityTypeRequest() - - -@pytest.mark.asyncio -async def test_get_entity_type_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.GetEntityTypeRequest): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_entity_type), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(entity_type.EntityType( - name='name_value', - description='description_value', - etag='etag_value', - )) - response = await client.get_entity_type(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.GetEntityTypeRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, entity_type.EntityType) - assert response.name == 'name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_get_entity_type_async_from_dict(): - await test_get_entity_type_async(request_type=dict) - - -def test_get_entity_type_field_headers(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.GetEntityTypeRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_entity_type), - '__call__') as call: - call.return_value = entity_type.EntityType() - client.get_entity_type(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_entity_type_field_headers_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.GetEntityTypeRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_entity_type), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(entity_type.EntityType()) - await client.get_entity_type(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_entity_type_flattened(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_entity_type), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = entity_type.EntityType() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_entity_type( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_entity_type_flattened_error(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_entity_type( - featurestore_service.GetEntityTypeRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_entity_type_flattened_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_entity_type), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = entity_type.EntityType() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(entity_type.EntityType()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_entity_type( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_entity_type_flattened_error_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_entity_type( - featurestore_service.GetEntityTypeRequest(), - name='name_value', - ) - - -def test_list_entity_types(transport: str = 'grpc', request_type=featurestore_service.ListEntityTypesRequest): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_entity_types), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = featurestore_service.ListEntityTypesResponse( - next_page_token='next_page_token_value', - ) - response = client.list_entity_types(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.ListEntityTypesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListEntityTypesPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_entity_types_from_dict(): - test_list_entity_types(request_type=dict) - - -def test_list_entity_types_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_entity_types), - '__call__') as call: - client.list_entity_types() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.ListEntityTypesRequest() - - -@pytest.mark.asyncio -async def test_list_entity_types_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.ListEntityTypesRequest): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_entity_types), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.ListEntityTypesResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_entity_types(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.ListEntityTypesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListEntityTypesAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_entity_types_async_from_dict(): - await test_list_entity_types_async(request_type=dict) - - -def test_list_entity_types_field_headers(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.ListEntityTypesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_entity_types), - '__call__') as call: - call.return_value = featurestore_service.ListEntityTypesResponse() - client.list_entity_types(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_entity_types_field_headers_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.ListEntityTypesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_entity_types), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.ListEntityTypesResponse()) - await client.list_entity_types(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_entity_types_flattened(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_entity_types), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = featurestore_service.ListEntityTypesResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_entity_types( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_entity_types_flattened_error(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_entity_types( - featurestore_service.ListEntityTypesRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_entity_types_flattened_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_entity_types), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = featurestore_service.ListEntityTypesResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.ListEntityTypesResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_entity_types( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_entity_types_flattened_error_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_entity_types( - featurestore_service.ListEntityTypesRequest(), - parent='parent_value', - ) - - -def test_list_entity_types_pager(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_entity_types), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - featurestore_service.ListEntityTypesResponse( - entity_types=[ - entity_type.EntityType(), - entity_type.EntityType(), - entity_type.EntityType(), - ], - next_page_token='abc', - ), - featurestore_service.ListEntityTypesResponse( - entity_types=[], - next_page_token='def', - ), - featurestore_service.ListEntityTypesResponse( - entity_types=[ - entity_type.EntityType(), - ], - next_page_token='ghi', - ), - featurestore_service.ListEntityTypesResponse( - entity_types=[ - entity_type.EntityType(), - entity_type.EntityType(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_entity_types(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, entity_type.EntityType) - for i in results) - -def test_list_entity_types_pages(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_entity_types), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - featurestore_service.ListEntityTypesResponse( - entity_types=[ - entity_type.EntityType(), - entity_type.EntityType(), - entity_type.EntityType(), - ], - next_page_token='abc', - ), - featurestore_service.ListEntityTypesResponse( - entity_types=[], - next_page_token='def', - ), - featurestore_service.ListEntityTypesResponse( - entity_types=[ - entity_type.EntityType(), - ], - next_page_token='ghi', - ), - featurestore_service.ListEntityTypesResponse( - entity_types=[ - entity_type.EntityType(), - entity_type.EntityType(), - ], - ), - RuntimeError, - ) - pages = list(client.list_entity_types(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_entity_types_async_pager(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_entity_types), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - featurestore_service.ListEntityTypesResponse( - entity_types=[ - entity_type.EntityType(), - entity_type.EntityType(), - entity_type.EntityType(), - ], - next_page_token='abc', - ), - featurestore_service.ListEntityTypesResponse( - entity_types=[], - next_page_token='def', - ), - featurestore_service.ListEntityTypesResponse( - entity_types=[ - entity_type.EntityType(), - ], - next_page_token='ghi', - ), - featurestore_service.ListEntityTypesResponse( - entity_types=[ - entity_type.EntityType(), - entity_type.EntityType(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_entity_types(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, entity_type.EntityType) - for i in responses) - -@pytest.mark.asyncio -async def test_list_entity_types_async_pages(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_entity_types), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - featurestore_service.ListEntityTypesResponse( - entity_types=[ - entity_type.EntityType(), - entity_type.EntityType(), - entity_type.EntityType(), - ], - next_page_token='abc', - ), - featurestore_service.ListEntityTypesResponse( - entity_types=[], - next_page_token='def', - ), - featurestore_service.ListEntityTypesResponse( - entity_types=[ - entity_type.EntityType(), - ], - next_page_token='ghi', - ), - featurestore_service.ListEntityTypesResponse( - entity_types=[ - entity_type.EntityType(), - entity_type.EntityType(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_entity_types(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_update_entity_type(transport: str = 'grpc', request_type=featurestore_service.UpdateEntityTypeRequest): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_entity_type), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_entity_type.EntityType( - name='name_value', - description='description_value', - etag='etag_value', - ) - response = client.update_entity_type(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.UpdateEntityTypeRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_entity_type.EntityType) - assert response.name == 'name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - - -def test_update_entity_type_from_dict(): - test_update_entity_type(request_type=dict) - - -def test_update_entity_type_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_entity_type), - '__call__') as call: - client.update_entity_type() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.UpdateEntityTypeRequest() - - -@pytest.mark.asyncio -async def test_update_entity_type_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.UpdateEntityTypeRequest): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_entity_type), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_entity_type.EntityType( - name='name_value', - description='description_value', - etag='etag_value', - )) - response = await client.update_entity_type(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.UpdateEntityTypeRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_entity_type.EntityType) - assert response.name == 'name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_update_entity_type_async_from_dict(): - await test_update_entity_type_async(request_type=dict) - - -def test_update_entity_type_field_headers(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.UpdateEntityTypeRequest() - - request.entity_type.name = 'entity_type.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_entity_type), - '__call__') as call: - call.return_value = gca_entity_type.EntityType() - client.update_entity_type(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'entity_type.name=entity_type.name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_entity_type_field_headers_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.UpdateEntityTypeRequest() - - request.entity_type.name = 'entity_type.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_entity_type), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_entity_type.EntityType()) - await client.update_entity_type(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'entity_type.name=entity_type.name/value', - ) in kw['metadata'] - - -def test_update_entity_type_flattened(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_entity_type), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_entity_type.EntityType() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_entity_type( - entity_type=gca_entity_type.EntityType(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].entity_type - mock_val = gca_entity_type.EntityType(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -def test_update_entity_type_flattened_error(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_entity_type( - featurestore_service.UpdateEntityTypeRequest(), - entity_type=gca_entity_type.EntityType(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.asyncio -async def test_update_entity_type_flattened_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_entity_type), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_entity_type.EntityType() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_entity_type.EntityType()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_entity_type( - entity_type=gca_entity_type.EntityType(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].entity_type - mock_val = gca_entity_type.EntityType(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_update_entity_type_flattened_error_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_entity_type( - featurestore_service.UpdateEntityTypeRequest(), - entity_type=gca_entity_type.EntityType(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_delete_entity_type(transport: str = 'grpc', request_type=featurestore_service.DeleteEntityTypeRequest): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_entity_type), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_entity_type(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.DeleteEntityTypeRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_entity_type_from_dict(): - test_delete_entity_type(request_type=dict) - - -def test_delete_entity_type_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_entity_type), - '__call__') as call: - client.delete_entity_type() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.DeleteEntityTypeRequest() - - -@pytest.mark.asyncio -async def test_delete_entity_type_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.DeleteEntityTypeRequest): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_entity_type), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_entity_type(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.DeleteEntityTypeRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_entity_type_async_from_dict(): - await test_delete_entity_type_async(request_type=dict) - - -def test_delete_entity_type_field_headers(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.DeleteEntityTypeRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_entity_type), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_entity_type(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_entity_type_field_headers_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.DeleteEntityTypeRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_entity_type), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_entity_type(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_entity_type_flattened(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_entity_type), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_entity_type( - name='name_value', - force=True, - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - arg = args[0].force - mock_val = True - assert arg == mock_val - - -def test_delete_entity_type_flattened_error(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_entity_type( - featurestore_service.DeleteEntityTypeRequest(), - name='name_value', - force=True, - ) - - -@pytest.mark.asyncio -async def test_delete_entity_type_flattened_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_entity_type), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_entity_type( - name='name_value', - force=True, - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - arg = args[0].force - mock_val = True - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_entity_type_flattened_error_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_entity_type( - featurestore_service.DeleteEntityTypeRequest(), - name='name_value', - force=True, - ) - - -def test_create_feature(transport: str = 'grpc', request_type=featurestore_service.CreateFeatureRequest): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_feature), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.create_feature(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.CreateFeatureRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_create_feature_from_dict(): - test_create_feature(request_type=dict) - - -def test_create_feature_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_feature), - '__call__') as call: - client.create_feature() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.CreateFeatureRequest() - - -@pytest.mark.asyncio -async def test_create_feature_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.CreateFeatureRequest): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_feature), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.create_feature(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.CreateFeatureRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_create_feature_async_from_dict(): - await test_create_feature_async(request_type=dict) - - -def test_create_feature_field_headers(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.CreateFeatureRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_feature), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.create_feature(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_feature_field_headers_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.CreateFeatureRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_feature), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.create_feature(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_feature_flattened(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_feature), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_feature( - parent='parent_value', - feature=gca_feature.Feature(name='name_value'), - feature_id='feature_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].feature - mock_val = gca_feature.Feature(name='name_value') - assert arg == mock_val - arg = args[0].feature_id - mock_val = 'feature_id_value' - assert arg == mock_val - - -def test_create_feature_flattened_error(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_feature( - featurestore_service.CreateFeatureRequest(), - parent='parent_value', - feature=gca_feature.Feature(name='name_value'), - feature_id='feature_id_value', - ) - - -@pytest.mark.asyncio -async def test_create_feature_flattened_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_feature), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_feature( - parent='parent_value', - feature=gca_feature.Feature(name='name_value'), - feature_id='feature_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].feature - mock_val = gca_feature.Feature(name='name_value') - assert arg == mock_val - arg = args[0].feature_id - mock_val = 'feature_id_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_feature_flattened_error_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_feature( - featurestore_service.CreateFeatureRequest(), - parent='parent_value', - feature=gca_feature.Feature(name='name_value'), - feature_id='feature_id_value', - ) - - -def test_batch_create_features(transport: str = 'grpc', request_type=featurestore_service.BatchCreateFeaturesRequest): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_create_features), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.batch_create_features(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.BatchCreateFeaturesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_batch_create_features_from_dict(): - test_batch_create_features(request_type=dict) - - -def test_batch_create_features_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_create_features), - '__call__') as call: - client.batch_create_features() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.BatchCreateFeaturesRequest() - - -@pytest.mark.asyncio -async def test_batch_create_features_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.BatchCreateFeaturesRequest): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_create_features), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.batch_create_features(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.BatchCreateFeaturesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_batch_create_features_async_from_dict(): - await test_batch_create_features_async(request_type=dict) - - -def test_batch_create_features_field_headers(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.BatchCreateFeaturesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_create_features), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.batch_create_features(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_batch_create_features_field_headers_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.BatchCreateFeaturesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_create_features), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.batch_create_features(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_batch_create_features_flattened(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_create_features), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.batch_create_features( - parent='parent_value', - requests=[featurestore_service.CreateFeatureRequest(parent='parent_value')], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].requests - mock_val = [featurestore_service.CreateFeatureRequest(parent='parent_value')] - assert arg == mock_val - - -def test_batch_create_features_flattened_error(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.batch_create_features( - featurestore_service.BatchCreateFeaturesRequest(), - parent='parent_value', - requests=[featurestore_service.CreateFeatureRequest(parent='parent_value')], - ) - - -@pytest.mark.asyncio -async def test_batch_create_features_flattened_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_create_features), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.batch_create_features( - parent='parent_value', - requests=[featurestore_service.CreateFeatureRequest(parent='parent_value')], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].requests - mock_val = [featurestore_service.CreateFeatureRequest(parent='parent_value')] - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_batch_create_features_flattened_error_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.batch_create_features( - featurestore_service.BatchCreateFeaturesRequest(), - parent='parent_value', - requests=[featurestore_service.CreateFeatureRequest(parent='parent_value')], - ) - - -def test_get_feature(transport: str = 'grpc', request_type=featurestore_service.GetFeatureRequest): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_feature), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = feature.Feature( - name='name_value', - description='description_value', - value_type=feature.Feature.ValueType.BOOL, - etag='etag_value', - ) - response = client.get_feature(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.GetFeatureRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, feature.Feature) - assert response.name == 'name_value' - assert response.description == 'description_value' - assert response.value_type == feature.Feature.ValueType.BOOL - assert response.etag == 'etag_value' - - -def test_get_feature_from_dict(): - test_get_feature(request_type=dict) - - -def test_get_feature_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_feature), - '__call__') as call: - client.get_feature() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.GetFeatureRequest() - - -@pytest.mark.asyncio -async def test_get_feature_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.GetFeatureRequest): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_feature), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(feature.Feature( - name='name_value', - description='description_value', - value_type=feature.Feature.ValueType.BOOL, - etag='etag_value', - )) - response = await client.get_feature(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.GetFeatureRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, feature.Feature) - assert response.name == 'name_value' - assert response.description == 'description_value' - assert response.value_type == feature.Feature.ValueType.BOOL - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_get_feature_async_from_dict(): - await test_get_feature_async(request_type=dict) - - -def test_get_feature_field_headers(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.GetFeatureRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_feature), - '__call__') as call: - call.return_value = feature.Feature() - client.get_feature(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_feature_field_headers_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.GetFeatureRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_feature), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(feature.Feature()) - await client.get_feature(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_feature_flattened(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_feature), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = feature.Feature() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_feature( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_feature_flattened_error(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_feature( - featurestore_service.GetFeatureRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_feature_flattened_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_feature), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = feature.Feature() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(feature.Feature()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_feature( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_feature_flattened_error_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_feature( - featurestore_service.GetFeatureRequest(), - name='name_value', - ) - - -def test_list_features(transport: str = 'grpc', request_type=featurestore_service.ListFeaturesRequest): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_features), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = featurestore_service.ListFeaturesResponse( - next_page_token='next_page_token_value', - ) - response = client.list_features(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.ListFeaturesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListFeaturesPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_features_from_dict(): - test_list_features(request_type=dict) - - -def test_list_features_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_features), - '__call__') as call: - client.list_features() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.ListFeaturesRequest() - - -@pytest.mark.asyncio -async def test_list_features_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.ListFeaturesRequest): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_features), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.ListFeaturesResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_features(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.ListFeaturesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListFeaturesAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_features_async_from_dict(): - await test_list_features_async(request_type=dict) - - -def test_list_features_field_headers(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.ListFeaturesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_features), - '__call__') as call: - call.return_value = featurestore_service.ListFeaturesResponse() - client.list_features(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_features_field_headers_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.ListFeaturesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_features), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.ListFeaturesResponse()) - await client.list_features(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_features_flattened(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_features), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = featurestore_service.ListFeaturesResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_features( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_features_flattened_error(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_features( - featurestore_service.ListFeaturesRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_features_flattened_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_features), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = featurestore_service.ListFeaturesResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.ListFeaturesResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_features( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_features_flattened_error_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_features( - featurestore_service.ListFeaturesRequest(), - parent='parent_value', - ) - - -def test_list_features_pager(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_features), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - featurestore_service.ListFeaturesResponse( - features=[ - feature.Feature(), - feature.Feature(), - feature.Feature(), - ], - next_page_token='abc', - ), - featurestore_service.ListFeaturesResponse( - features=[], - next_page_token='def', - ), - featurestore_service.ListFeaturesResponse( - features=[ - feature.Feature(), - ], - next_page_token='ghi', - ), - featurestore_service.ListFeaturesResponse( - features=[ - feature.Feature(), - feature.Feature(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_features(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, feature.Feature) - for i in results) - -def test_list_features_pages(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_features), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - featurestore_service.ListFeaturesResponse( - features=[ - feature.Feature(), - feature.Feature(), - feature.Feature(), - ], - next_page_token='abc', - ), - featurestore_service.ListFeaturesResponse( - features=[], - next_page_token='def', - ), - featurestore_service.ListFeaturesResponse( - features=[ - feature.Feature(), - ], - next_page_token='ghi', - ), - featurestore_service.ListFeaturesResponse( - features=[ - feature.Feature(), - feature.Feature(), - ], - ), - RuntimeError, - ) - pages = list(client.list_features(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_features_async_pager(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_features), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - featurestore_service.ListFeaturesResponse( - features=[ - feature.Feature(), - feature.Feature(), - feature.Feature(), - ], - next_page_token='abc', - ), - featurestore_service.ListFeaturesResponse( - features=[], - next_page_token='def', - ), - featurestore_service.ListFeaturesResponse( - features=[ - feature.Feature(), - ], - next_page_token='ghi', - ), - featurestore_service.ListFeaturesResponse( - features=[ - feature.Feature(), - feature.Feature(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_features(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, feature.Feature) - for i in responses) - -@pytest.mark.asyncio -async def test_list_features_async_pages(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_features), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - featurestore_service.ListFeaturesResponse( - features=[ - feature.Feature(), - feature.Feature(), - feature.Feature(), - ], - next_page_token='abc', - ), - featurestore_service.ListFeaturesResponse( - features=[], - next_page_token='def', - ), - featurestore_service.ListFeaturesResponse( - features=[ - feature.Feature(), - ], - next_page_token='ghi', - ), - featurestore_service.ListFeaturesResponse( - features=[ - feature.Feature(), - feature.Feature(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_features(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_update_feature(transport: str = 'grpc', request_type=featurestore_service.UpdateFeatureRequest): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_feature), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_feature.Feature( - name='name_value', - description='description_value', - value_type=gca_feature.Feature.ValueType.BOOL, - etag='etag_value', - ) - response = client.update_feature(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.UpdateFeatureRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_feature.Feature) - assert response.name == 'name_value' - assert response.description == 'description_value' - assert response.value_type == gca_feature.Feature.ValueType.BOOL - assert response.etag == 'etag_value' - - -def test_update_feature_from_dict(): - test_update_feature(request_type=dict) - - -def test_update_feature_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_feature), - '__call__') as call: - client.update_feature() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.UpdateFeatureRequest() - - -@pytest.mark.asyncio -async def test_update_feature_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.UpdateFeatureRequest): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_feature), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_feature.Feature( - name='name_value', - description='description_value', - value_type=gca_feature.Feature.ValueType.BOOL, - etag='etag_value', - )) - response = await client.update_feature(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.UpdateFeatureRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_feature.Feature) - assert response.name == 'name_value' - assert response.description == 'description_value' - assert response.value_type == gca_feature.Feature.ValueType.BOOL - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_update_feature_async_from_dict(): - await test_update_feature_async(request_type=dict) - - -def test_update_feature_field_headers(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.UpdateFeatureRequest() - - request.feature.name = 'feature.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_feature), - '__call__') as call: - call.return_value = gca_feature.Feature() - client.update_feature(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'feature.name=feature.name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_feature_field_headers_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.UpdateFeatureRequest() - - request.feature.name = 'feature.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_feature), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_feature.Feature()) - await client.update_feature(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'feature.name=feature.name/value', - ) in kw['metadata'] - - -def test_update_feature_flattened(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_feature), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_feature.Feature() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_feature( - feature=gca_feature.Feature(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].feature - mock_val = gca_feature.Feature(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -def test_update_feature_flattened_error(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_feature( - featurestore_service.UpdateFeatureRequest(), - feature=gca_feature.Feature(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.asyncio -async def test_update_feature_flattened_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_feature), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_feature.Feature() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_feature.Feature()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_feature( - feature=gca_feature.Feature(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].feature - mock_val = gca_feature.Feature(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_update_feature_flattened_error_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_feature( - featurestore_service.UpdateFeatureRequest(), - feature=gca_feature.Feature(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_delete_feature(transport: str = 'grpc', request_type=featurestore_service.DeleteFeatureRequest): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_feature), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_feature(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.DeleteFeatureRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_feature_from_dict(): - test_delete_feature(request_type=dict) - - -def test_delete_feature_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_feature), - '__call__') as call: - client.delete_feature() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.DeleteFeatureRequest() - - -@pytest.mark.asyncio -async def test_delete_feature_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.DeleteFeatureRequest): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_feature), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_feature(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.DeleteFeatureRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_feature_async_from_dict(): - await test_delete_feature_async(request_type=dict) - - -def test_delete_feature_field_headers(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.DeleteFeatureRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_feature), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_feature(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_feature_field_headers_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.DeleteFeatureRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_feature), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_feature(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_feature_flattened(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_feature), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_feature( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_delete_feature_flattened_error(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_feature( - featurestore_service.DeleteFeatureRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_feature_flattened_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_feature), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_feature( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_feature_flattened_error_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_feature( - featurestore_service.DeleteFeatureRequest(), - name='name_value', - ) - - -def test_import_feature_values(transport: str = 'grpc', request_type=featurestore_service.ImportFeatureValuesRequest): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_feature_values), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.import_feature_values(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.ImportFeatureValuesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_import_feature_values_from_dict(): - test_import_feature_values(request_type=dict) - - -def test_import_feature_values_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_feature_values), - '__call__') as call: - client.import_feature_values() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.ImportFeatureValuesRequest() - - -@pytest.mark.asyncio -async def test_import_feature_values_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.ImportFeatureValuesRequest): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_feature_values), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.import_feature_values(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.ImportFeatureValuesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_import_feature_values_async_from_dict(): - await test_import_feature_values_async(request_type=dict) - - -def test_import_feature_values_field_headers(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.ImportFeatureValuesRequest() - - request.entity_type = 'entity_type/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_feature_values), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.import_feature_values(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'entity_type=entity_type/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_import_feature_values_field_headers_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.ImportFeatureValuesRequest() - - request.entity_type = 'entity_type/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_feature_values), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.import_feature_values(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'entity_type=entity_type/value', - ) in kw['metadata'] - - -def test_import_feature_values_flattened(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_feature_values), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.import_feature_values( - entity_type='entity_type_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].entity_type - mock_val = 'entity_type_value' - assert arg == mock_val - - -def test_import_feature_values_flattened_error(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.import_feature_values( - featurestore_service.ImportFeatureValuesRequest(), - entity_type='entity_type_value', - ) - - -@pytest.mark.asyncio -async def test_import_feature_values_flattened_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_feature_values), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.import_feature_values( - entity_type='entity_type_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].entity_type - mock_val = 'entity_type_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_import_feature_values_flattened_error_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.import_feature_values( - featurestore_service.ImportFeatureValuesRequest(), - entity_type='entity_type_value', - ) - - -def test_batch_read_feature_values(transport: str = 'grpc', request_type=featurestore_service.BatchReadFeatureValuesRequest): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_read_feature_values), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.batch_read_feature_values(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.BatchReadFeatureValuesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_batch_read_feature_values_from_dict(): - test_batch_read_feature_values(request_type=dict) - - -def test_batch_read_feature_values_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_read_feature_values), - '__call__') as call: - client.batch_read_feature_values() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.BatchReadFeatureValuesRequest() - - -@pytest.mark.asyncio -async def test_batch_read_feature_values_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.BatchReadFeatureValuesRequest): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_read_feature_values), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.batch_read_feature_values(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.BatchReadFeatureValuesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_batch_read_feature_values_async_from_dict(): - await test_batch_read_feature_values_async(request_type=dict) - - -def test_batch_read_feature_values_field_headers(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.BatchReadFeatureValuesRequest() - - request.featurestore = 'featurestore/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_read_feature_values), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.batch_read_feature_values(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'featurestore=featurestore/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_batch_read_feature_values_field_headers_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.BatchReadFeatureValuesRequest() - - request.featurestore = 'featurestore/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_read_feature_values), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.batch_read_feature_values(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'featurestore=featurestore/value', - ) in kw['metadata'] - - -def test_batch_read_feature_values_flattened(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_read_feature_values), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.batch_read_feature_values( - featurestore='featurestore_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].featurestore - mock_val = 'featurestore_value' - assert arg == mock_val - - -def test_batch_read_feature_values_flattened_error(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.batch_read_feature_values( - featurestore_service.BatchReadFeatureValuesRequest(), - featurestore='featurestore_value', - ) - - -@pytest.mark.asyncio -async def test_batch_read_feature_values_flattened_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_read_feature_values), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.batch_read_feature_values( - featurestore='featurestore_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].featurestore - mock_val = 'featurestore_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_batch_read_feature_values_flattened_error_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.batch_read_feature_values( - featurestore_service.BatchReadFeatureValuesRequest(), - featurestore='featurestore_value', - ) - - -def test_export_feature_values(transport: str = 'grpc', request_type=featurestore_service.ExportFeatureValuesRequest): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_feature_values), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.export_feature_values(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.ExportFeatureValuesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_export_feature_values_from_dict(): - test_export_feature_values(request_type=dict) - - -def test_export_feature_values_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_feature_values), - '__call__') as call: - client.export_feature_values() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.ExportFeatureValuesRequest() - - -@pytest.mark.asyncio -async def test_export_feature_values_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.ExportFeatureValuesRequest): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_feature_values), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.export_feature_values(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.ExportFeatureValuesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_export_feature_values_async_from_dict(): - await test_export_feature_values_async(request_type=dict) - - -def test_export_feature_values_field_headers(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.ExportFeatureValuesRequest() - - request.entity_type = 'entity_type/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_feature_values), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.export_feature_values(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'entity_type=entity_type/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_export_feature_values_field_headers_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.ExportFeatureValuesRequest() - - request.entity_type = 'entity_type/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_feature_values), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.export_feature_values(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'entity_type=entity_type/value', - ) in kw['metadata'] - - -def test_export_feature_values_flattened(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_feature_values), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.export_feature_values( - entity_type='entity_type_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].entity_type - mock_val = 'entity_type_value' - assert arg == mock_val - - -def test_export_feature_values_flattened_error(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.export_feature_values( - featurestore_service.ExportFeatureValuesRequest(), - entity_type='entity_type_value', - ) - - -@pytest.mark.asyncio -async def test_export_feature_values_flattened_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_feature_values), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.export_feature_values( - entity_type='entity_type_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].entity_type - mock_val = 'entity_type_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_export_feature_values_flattened_error_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.export_feature_values( - featurestore_service.ExportFeatureValuesRequest(), - entity_type='entity_type_value', - ) - - -def test_search_features(transport: str = 'grpc', request_type=featurestore_service.SearchFeaturesRequest): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_features), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = featurestore_service.SearchFeaturesResponse( - next_page_token='next_page_token_value', - ) - response = client.search_features(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.SearchFeaturesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.SearchFeaturesPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_search_features_from_dict(): - test_search_features(request_type=dict) - - -def test_search_features_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_features), - '__call__') as call: - client.search_features() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.SearchFeaturesRequest() - - -@pytest.mark.asyncio -async def test_search_features_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.SearchFeaturesRequest): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_features), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.SearchFeaturesResponse( - next_page_token='next_page_token_value', - )) - response = await client.search_features(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.SearchFeaturesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.SearchFeaturesAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_search_features_async_from_dict(): - await test_search_features_async(request_type=dict) - - -def test_search_features_field_headers(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.SearchFeaturesRequest() - - request.location = 'location/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_features), - '__call__') as call: - call.return_value = featurestore_service.SearchFeaturesResponse() - client.search_features(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'location=location/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_search_features_field_headers_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.SearchFeaturesRequest() - - request.location = 'location/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_features), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.SearchFeaturesResponse()) - await client.search_features(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'location=location/value', - ) in kw['metadata'] - - -def test_search_features_flattened(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_features), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = featurestore_service.SearchFeaturesResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.search_features( - location='location_value', - query='query_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].location - mock_val = 'location_value' - assert arg == mock_val - arg = args[0].query - mock_val = 'query_value' - assert arg == mock_val - - -def test_search_features_flattened_error(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.search_features( - featurestore_service.SearchFeaturesRequest(), - location='location_value', - query='query_value', - ) - - -@pytest.mark.asyncio -async def test_search_features_flattened_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_features), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = featurestore_service.SearchFeaturesResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.SearchFeaturesResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.search_features( - location='location_value', - query='query_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].location - mock_val = 'location_value' - assert arg == mock_val - arg = args[0].query - mock_val = 'query_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_search_features_flattened_error_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.search_features( - featurestore_service.SearchFeaturesRequest(), - location='location_value', - query='query_value', - ) - - -def test_search_features_pager(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_features), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - featurestore_service.SearchFeaturesResponse( - features=[ - feature.Feature(), - feature.Feature(), - feature.Feature(), - ], - next_page_token='abc', - ), - featurestore_service.SearchFeaturesResponse( - features=[], - next_page_token='def', - ), - featurestore_service.SearchFeaturesResponse( - features=[ - feature.Feature(), - ], - next_page_token='ghi', - ), - featurestore_service.SearchFeaturesResponse( - features=[ - feature.Feature(), - feature.Feature(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('location', ''), - )), - ) - pager = client.search_features(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, feature.Feature) - for i in results) - -def test_search_features_pages(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_features), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - featurestore_service.SearchFeaturesResponse( - features=[ - feature.Feature(), - feature.Feature(), - feature.Feature(), - ], - next_page_token='abc', - ), - featurestore_service.SearchFeaturesResponse( - features=[], - next_page_token='def', - ), - featurestore_service.SearchFeaturesResponse( - features=[ - feature.Feature(), - ], - next_page_token='ghi', - ), - featurestore_service.SearchFeaturesResponse( - features=[ - feature.Feature(), - feature.Feature(), - ], - ), - RuntimeError, - ) - pages = list(client.search_features(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_search_features_async_pager(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_features), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - featurestore_service.SearchFeaturesResponse( - features=[ - feature.Feature(), - feature.Feature(), - feature.Feature(), - ], - next_page_token='abc', - ), - featurestore_service.SearchFeaturesResponse( - features=[], - next_page_token='def', - ), - featurestore_service.SearchFeaturesResponse( - features=[ - feature.Feature(), - ], - next_page_token='ghi', - ), - featurestore_service.SearchFeaturesResponse( - features=[ - feature.Feature(), - feature.Feature(), - ], - ), - RuntimeError, - ) - async_pager = await client.search_features(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, feature.Feature) - for i in responses) - -@pytest.mark.asyncio -async def test_search_features_async_pages(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_features), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - featurestore_service.SearchFeaturesResponse( - features=[ - feature.Feature(), - feature.Feature(), - feature.Feature(), - ], - next_page_token='abc', - ), - featurestore_service.SearchFeaturesResponse( - features=[], - next_page_token='def', - ), - featurestore_service.SearchFeaturesResponse( - features=[ - feature.Feature(), - ], - next_page_token='ghi', - ), - featurestore_service.SearchFeaturesResponse( - features=[ - feature.Feature(), - feature.Feature(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.search_features(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.FeaturestoreServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.FeaturestoreServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = FeaturestoreServiceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.FeaturestoreServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = FeaturestoreServiceClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.FeaturestoreServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = FeaturestoreServiceClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.FeaturestoreServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.FeaturestoreServiceGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.FeaturestoreServiceGrpcTransport, - transports.FeaturestoreServiceGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.FeaturestoreServiceGrpcTransport, - ) - -def test_featurestore_service_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.FeaturestoreServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_featurestore_service_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.aiplatform_v1.services.featurestore_service.transports.FeaturestoreServiceTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.FeaturestoreServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'create_featurestore', - 'get_featurestore', - 'list_featurestores', - 'update_featurestore', - 'delete_featurestore', - 'create_entity_type', - 'get_entity_type', - 'list_entity_types', - 'update_entity_type', - 'delete_entity_type', - 'create_feature', - 'batch_create_features', - 'get_feature', - 'list_features', - 'update_feature', - 'delete_feature', - 'import_feature_values', - 'batch_read_feature_values', - 'export_feature_values', - 'search_features', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - with pytest.raises(NotImplementedError): - transport.close() - - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client - - -def test_featurestore_service_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.featurestore_service.transports.FeaturestoreServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.FeaturestoreServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -def test_featurestore_service_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1.services.featurestore_service.transports.FeaturestoreServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.FeaturestoreServiceTransport() - adc.assert_called_once() - - -def test_featurestore_service_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - FeaturestoreServiceClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.FeaturestoreServiceGrpcTransport, - transports.FeaturestoreServiceGrpcAsyncIOTransport, - ], -) -def test_featurestore_service_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.FeaturestoreServiceGrpcTransport, grpc_helpers), - (transports.FeaturestoreServiceGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_featurestore_service_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "aiplatform.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="aiplatform.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.FeaturestoreServiceGrpcTransport, transports.FeaturestoreServiceGrpcAsyncIOTransport]) -def test_featurestore_service_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_featurestore_service_host_no_port(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_featurestore_service_host_with_port(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:8000' - -def test_featurestore_service_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.FeaturestoreServiceGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_featurestore_service_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.FeaturestoreServiceGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.FeaturestoreServiceGrpcTransport, transports.FeaturestoreServiceGrpcAsyncIOTransport]) -def test_featurestore_service_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.FeaturestoreServiceGrpcTransport, transports.FeaturestoreServiceGrpcAsyncIOTransport]) -def test_featurestore_service_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_featurestore_service_grpc_lro_client(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_featurestore_service_grpc_lro_async_client(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc_asyncio', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsAsyncClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_entity_type_path(): - project = "squid" - location = "clam" - featurestore = "whelk" - entity_type = "octopus" - expected = "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}".format(project=project, location=location, featurestore=featurestore, entity_type=entity_type, ) - actual = FeaturestoreServiceClient.entity_type_path(project, location, featurestore, entity_type) - assert expected == actual - - -def test_parse_entity_type_path(): - expected = { - "project": "oyster", - "location": "nudibranch", - "featurestore": "cuttlefish", - "entity_type": "mussel", - } - path = FeaturestoreServiceClient.entity_type_path(**expected) - - # Check that the path construction is reversible. - actual = FeaturestoreServiceClient.parse_entity_type_path(path) - assert expected == actual - -def test_feature_path(): - project = "winkle" - location = "nautilus" - featurestore = "scallop" - entity_type = "abalone" - feature = "squid" - expected = "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}".format(project=project, location=location, featurestore=featurestore, entity_type=entity_type, feature=feature, ) - actual = FeaturestoreServiceClient.feature_path(project, location, featurestore, entity_type, feature) - assert expected == actual - - -def test_parse_feature_path(): - expected = { - "project": "clam", - "location": "whelk", - "featurestore": "octopus", - "entity_type": "oyster", - "feature": "nudibranch", - } - path = FeaturestoreServiceClient.feature_path(**expected) - - # Check that the path construction is reversible. - actual = FeaturestoreServiceClient.parse_feature_path(path) - assert expected == actual - -def test_featurestore_path(): - project = "cuttlefish" - location = "mussel" - featurestore = "winkle" - expected = "projects/{project}/locations/{location}/featurestores/{featurestore}".format(project=project, location=location, featurestore=featurestore, ) - actual = FeaturestoreServiceClient.featurestore_path(project, location, featurestore) - assert expected == actual - - -def test_parse_featurestore_path(): - expected = { - "project": "nautilus", - "location": "scallop", - "featurestore": "abalone", - } - path = FeaturestoreServiceClient.featurestore_path(**expected) - - # Check that the path construction is reversible. - actual = FeaturestoreServiceClient.parse_featurestore_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "squid" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = FeaturestoreServiceClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "clam", - } - path = FeaturestoreServiceClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = FeaturestoreServiceClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "whelk" - expected = "folders/{folder}".format(folder=folder, ) - actual = FeaturestoreServiceClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "octopus", - } - path = FeaturestoreServiceClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = FeaturestoreServiceClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "oyster" - expected = "organizations/{organization}".format(organization=organization, ) - actual = FeaturestoreServiceClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "nudibranch", - } - path = FeaturestoreServiceClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = FeaturestoreServiceClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "cuttlefish" - expected = "projects/{project}".format(project=project, ) - actual = FeaturestoreServiceClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "mussel", - } - path = FeaturestoreServiceClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = FeaturestoreServiceClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "winkle" - location = "nautilus" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = FeaturestoreServiceClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "scallop", - "location": "abalone", - } - path = FeaturestoreServiceClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = FeaturestoreServiceClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.FeaturestoreServiceTransport, '_prep_wrapped_messages') as prep: - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.FeaturestoreServiceTransport, '_prep_wrapped_messages') as prep: - transport_class = FeaturestoreServiceClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - -@pytest.mark.asyncio -async def test_transport_close_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: - async with client: - close.assert_not_called() - close.assert_called_once() - -def test_transport_close(): - transports = { - "grpc": "_grpc_channel", - } - - for transport, close_name in transports.items(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: - with client: - close.assert_not_called() - close.assert_called_once() - -def test_client_ctx(): - transports = [ - 'grpc', - ] - for transport in transports: - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - # Test client calls underlying transport. - with mock.patch.object(type(client.transport), "close") as close: - close.assert_not_called() - with client: - pass - close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_index_endpoint_service.py b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_index_endpoint_service.py deleted file mode 100644 index e1024f9d06..0000000000 --- a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_index_endpoint_service.py +++ /dev/null @@ -1,3138 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import future -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import operation_async # type: ignore -from google.api_core import operations_v1 -from google.api_core import path_template -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.aiplatform_v1.services.index_endpoint_service import IndexEndpointServiceAsyncClient -from google.cloud.aiplatform_v1.services.index_endpoint_service import IndexEndpointServiceClient -from google.cloud.aiplatform_v1.services.index_endpoint_service import pagers -from google.cloud.aiplatform_v1.services.index_endpoint_service import transports -from google.cloud.aiplatform_v1.types import index_endpoint -from google.cloud.aiplatform_v1.types import index_endpoint as gca_index_endpoint -from google.cloud.aiplatform_v1.types import index_endpoint_service -from google.cloud.aiplatform_v1.types import machine_resources -from google.cloud.aiplatform_v1.types import operation as gca_operation -from google.longrunning import operations_pb2 -from google.oauth2 import service_account -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -import google.auth - - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert IndexEndpointServiceClient._get_default_mtls_endpoint(None) is None - assert IndexEndpointServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert IndexEndpointServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert IndexEndpointServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert IndexEndpointServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert IndexEndpointServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - IndexEndpointServiceClient, - IndexEndpointServiceAsyncClient, -]) -def test_index_endpoint_service_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.IndexEndpointServiceGrpcTransport, "grpc"), - (transports.IndexEndpointServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_index_endpoint_service_client_service_account_always_use_jwt(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=False) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("client_class", [ - IndexEndpointServiceClient, - IndexEndpointServiceAsyncClient, -]) -def test_index_endpoint_service_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_index_endpoint_service_client_get_transport_class(): - transport = IndexEndpointServiceClient.get_transport_class() - available_transports = [ - transports.IndexEndpointServiceGrpcTransport, - ] - assert transport in available_transports - - transport = IndexEndpointServiceClient.get_transport_class("grpc") - assert transport == transports.IndexEndpointServiceGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (IndexEndpointServiceClient, transports.IndexEndpointServiceGrpcTransport, "grpc"), - (IndexEndpointServiceAsyncClient, transports.IndexEndpointServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(IndexEndpointServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IndexEndpointServiceClient)) -@mock.patch.object(IndexEndpointServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IndexEndpointServiceAsyncClient)) -def test_index_endpoint_service_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(IndexEndpointServiceClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(IndexEndpointServiceClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (IndexEndpointServiceClient, transports.IndexEndpointServiceGrpcTransport, "grpc", "true"), - (IndexEndpointServiceAsyncClient, transports.IndexEndpointServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (IndexEndpointServiceClient, transports.IndexEndpointServiceGrpcTransport, "grpc", "false"), - (IndexEndpointServiceAsyncClient, transports.IndexEndpointServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(IndexEndpointServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IndexEndpointServiceClient)) -@mock.patch.object(IndexEndpointServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IndexEndpointServiceAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_index_endpoint_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (IndexEndpointServiceClient, transports.IndexEndpointServiceGrpcTransport, "grpc"), - (IndexEndpointServiceAsyncClient, transports.IndexEndpointServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_index_endpoint_service_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (IndexEndpointServiceClient, transports.IndexEndpointServiceGrpcTransport, "grpc"), - (IndexEndpointServiceAsyncClient, transports.IndexEndpointServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_index_endpoint_service_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_index_endpoint_service_client_client_options_from_dict(): - with mock.patch('google.cloud.aiplatform_v1.services.index_endpoint_service.transports.IndexEndpointServiceGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = IndexEndpointServiceClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_create_index_endpoint(transport: str = 'grpc', request_type=index_endpoint_service.CreateIndexEndpointRequest): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_index_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.create_index_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.CreateIndexEndpointRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_create_index_endpoint_from_dict(): - test_create_index_endpoint(request_type=dict) - - -def test_create_index_endpoint_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_index_endpoint), - '__call__') as call: - client.create_index_endpoint() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.CreateIndexEndpointRequest() - - -@pytest.mark.asyncio -async def test_create_index_endpoint_async(transport: str = 'grpc_asyncio', request_type=index_endpoint_service.CreateIndexEndpointRequest): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_index_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.create_index_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.CreateIndexEndpointRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_create_index_endpoint_async_from_dict(): - await test_create_index_endpoint_async(request_type=dict) - - -def test_create_index_endpoint_field_headers(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_endpoint_service.CreateIndexEndpointRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_index_endpoint), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.create_index_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_index_endpoint_field_headers_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_endpoint_service.CreateIndexEndpointRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_index_endpoint), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.create_index_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_index_endpoint_flattened(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_index_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_index_endpoint( - parent='parent_value', - index_endpoint=gca_index_endpoint.IndexEndpoint(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].index_endpoint - mock_val = gca_index_endpoint.IndexEndpoint(name='name_value') - assert arg == mock_val - - -def test_create_index_endpoint_flattened_error(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_index_endpoint( - index_endpoint_service.CreateIndexEndpointRequest(), - parent='parent_value', - index_endpoint=gca_index_endpoint.IndexEndpoint(name='name_value'), - ) - - -@pytest.mark.asyncio -async def test_create_index_endpoint_flattened_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_index_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_index_endpoint( - parent='parent_value', - index_endpoint=gca_index_endpoint.IndexEndpoint(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].index_endpoint - mock_val = gca_index_endpoint.IndexEndpoint(name='name_value') - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_index_endpoint_flattened_error_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_index_endpoint( - index_endpoint_service.CreateIndexEndpointRequest(), - parent='parent_value', - index_endpoint=gca_index_endpoint.IndexEndpoint(name='name_value'), - ) - - -def test_get_index_endpoint(transport: str = 'grpc', request_type=index_endpoint_service.GetIndexEndpointRequest): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_index_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = index_endpoint.IndexEndpoint( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - network='network_value', - enable_private_service_connect=True, - ) - response = client.get_index_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.GetIndexEndpointRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, index_endpoint.IndexEndpoint) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - assert response.network == 'network_value' - assert response.enable_private_service_connect is True - - -def test_get_index_endpoint_from_dict(): - test_get_index_endpoint(request_type=dict) - - -def test_get_index_endpoint_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_index_endpoint), - '__call__') as call: - client.get_index_endpoint() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.GetIndexEndpointRequest() - - -@pytest.mark.asyncio -async def test_get_index_endpoint_async(transport: str = 'grpc_asyncio', request_type=index_endpoint_service.GetIndexEndpointRequest): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_index_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(index_endpoint.IndexEndpoint( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - network='network_value', - enable_private_service_connect=True, - )) - response = await client.get_index_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.GetIndexEndpointRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, index_endpoint.IndexEndpoint) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - assert response.network == 'network_value' - assert response.enable_private_service_connect is True - - -@pytest.mark.asyncio -async def test_get_index_endpoint_async_from_dict(): - await test_get_index_endpoint_async(request_type=dict) - - -def test_get_index_endpoint_field_headers(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_endpoint_service.GetIndexEndpointRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_index_endpoint), - '__call__') as call: - call.return_value = index_endpoint.IndexEndpoint() - client.get_index_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_index_endpoint_field_headers_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_endpoint_service.GetIndexEndpointRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_index_endpoint), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(index_endpoint.IndexEndpoint()) - await client.get_index_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_index_endpoint_flattened(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_index_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = index_endpoint.IndexEndpoint() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_index_endpoint( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_index_endpoint_flattened_error(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_index_endpoint( - index_endpoint_service.GetIndexEndpointRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_index_endpoint_flattened_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_index_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = index_endpoint.IndexEndpoint() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(index_endpoint.IndexEndpoint()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_index_endpoint( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_index_endpoint_flattened_error_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_index_endpoint( - index_endpoint_service.GetIndexEndpointRequest(), - name='name_value', - ) - - -def test_list_index_endpoints(transport: str = 'grpc', request_type=index_endpoint_service.ListIndexEndpointsRequest): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_index_endpoints), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = index_endpoint_service.ListIndexEndpointsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_index_endpoints(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.ListIndexEndpointsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListIndexEndpointsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_index_endpoints_from_dict(): - test_list_index_endpoints(request_type=dict) - - -def test_list_index_endpoints_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_index_endpoints), - '__call__') as call: - client.list_index_endpoints() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.ListIndexEndpointsRequest() - - -@pytest.mark.asyncio -async def test_list_index_endpoints_async(transport: str = 'grpc_asyncio', request_type=index_endpoint_service.ListIndexEndpointsRequest): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_index_endpoints), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(index_endpoint_service.ListIndexEndpointsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_index_endpoints(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.ListIndexEndpointsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListIndexEndpointsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_index_endpoints_async_from_dict(): - await test_list_index_endpoints_async(request_type=dict) - - -def test_list_index_endpoints_field_headers(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_endpoint_service.ListIndexEndpointsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_index_endpoints), - '__call__') as call: - call.return_value = index_endpoint_service.ListIndexEndpointsResponse() - client.list_index_endpoints(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_index_endpoints_field_headers_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_endpoint_service.ListIndexEndpointsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_index_endpoints), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(index_endpoint_service.ListIndexEndpointsResponse()) - await client.list_index_endpoints(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_index_endpoints_flattened(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_index_endpoints), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = index_endpoint_service.ListIndexEndpointsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_index_endpoints( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_index_endpoints_flattened_error(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_index_endpoints( - index_endpoint_service.ListIndexEndpointsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_index_endpoints_flattened_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_index_endpoints), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = index_endpoint_service.ListIndexEndpointsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(index_endpoint_service.ListIndexEndpointsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_index_endpoints( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_index_endpoints_flattened_error_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_index_endpoints( - index_endpoint_service.ListIndexEndpointsRequest(), - parent='parent_value', - ) - - -def test_list_index_endpoints_pager(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_index_endpoints), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - index_endpoint_service.ListIndexEndpointsResponse( - index_endpoints=[ - index_endpoint.IndexEndpoint(), - index_endpoint.IndexEndpoint(), - index_endpoint.IndexEndpoint(), - ], - next_page_token='abc', - ), - index_endpoint_service.ListIndexEndpointsResponse( - index_endpoints=[], - next_page_token='def', - ), - index_endpoint_service.ListIndexEndpointsResponse( - index_endpoints=[ - index_endpoint.IndexEndpoint(), - ], - next_page_token='ghi', - ), - index_endpoint_service.ListIndexEndpointsResponse( - index_endpoints=[ - index_endpoint.IndexEndpoint(), - index_endpoint.IndexEndpoint(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_index_endpoints(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, index_endpoint.IndexEndpoint) - for i in results) - -def test_list_index_endpoints_pages(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_index_endpoints), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - index_endpoint_service.ListIndexEndpointsResponse( - index_endpoints=[ - index_endpoint.IndexEndpoint(), - index_endpoint.IndexEndpoint(), - index_endpoint.IndexEndpoint(), - ], - next_page_token='abc', - ), - index_endpoint_service.ListIndexEndpointsResponse( - index_endpoints=[], - next_page_token='def', - ), - index_endpoint_service.ListIndexEndpointsResponse( - index_endpoints=[ - index_endpoint.IndexEndpoint(), - ], - next_page_token='ghi', - ), - index_endpoint_service.ListIndexEndpointsResponse( - index_endpoints=[ - index_endpoint.IndexEndpoint(), - index_endpoint.IndexEndpoint(), - ], - ), - RuntimeError, - ) - pages = list(client.list_index_endpoints(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_index_endpoints_async_pager(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_index_endpoints), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - index_endpoint_service.ListIndexEndpointsResponse( - index_endpoints=[ - index_endpoint.IndexEndpoint(), - index_endpoint.IndexEndpoint(), - index_endpoint.IndexEndpoint(), - ], - next_page_token='abc', - ), - index_endpoint_service.ListIndexEndpointsResponse( - index_endpoints=[], - next_page_token='def', - ), - index_endpoint_service.ListIndexEndpointsResponse( - index_endpoints=[ - index_endpoint.IndexEndpoint(), - ], - next_page_token='ghi', - ), - index_endpoint_service.ListIndexEndpointsResponse( - index_endpoints=[ - index_endpoint.IndexEndpoint(), - index_endpoint.IndexEndpoint(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_index_endpoints(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, index_endpoint.IndexEndpoint) - for i in responses) - -@pytest.mark.asyncio -async def test_list_index_endpoints_async_pages(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_index_endpoints), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - index_endpoint_service.ListIndexEndpointsResponse( - index_endpoints=[ - index_endpoint.IndexEndpoint(), - index_endpoint.IndexEndpoint(), - index_endpoint.IndexEndpoint(), - ], - next_page_token='abc', - ), - index_endpoint_service.ListIndexEndpointsResponse( - index_endpoints=[], - next_page_token='def', - ), - index_endpoint_service.ListIndexEndpointsResponse( - index_endpoints=[ - index_endpoint.IndexEndpoint(), - ], - next_page_token='ghi', - ), - index_endpoint_service.ListIndexEndpointsResponse( - index_endpoints=[ - index_endpoint.IndexEndpoint(), - index_endpoint.IndexEndpoint(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_index_endpoints(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_update_index_endpoint(transport: str = 'grpc', request_type=index_endpoint_service.UpdateIndexEndpointRequest): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_index_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_index_endpoint.IndexEndpoint( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - network='network_value', - enable_private_service_connect=True, - ) - response = client.update_index_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.UpdateIndexEndpointRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_index_endpoint.IndexEndpoint) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - assert response.network == 'network_value' - assert response.enable_private_service_connect is True - - -def test_update_index_endpoint_from_dict(): - test_update_index_endpoint(request_type=dict) - - -def test_update_index_endpoint_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_index_endpoint), - '__call__') as call: - client.update_index_endpoint() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.UpdateIndexEndpointRequest() - - -@pytest.mark.asyncio -async def test_update_index_endpoint_async(transport: str = 'grpc_asyncio', request_type=index_endpoint_service.UpdateIndexEndpointRequest): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_index_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_index_endpoint.IndexEndpoint( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - network='network_value', - enable_private_service_connect=True, - )) - response = await client.update_index_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.UpdateIndexEndpointRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_index_endpoint.IndexEndpoint) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - assert response.network == 'network_value' - assert response.enable_private_service_connect is True - - -@pytest.mark.asyncio -async def test_update_index_endpoint_async_from_dict(): - await test_update_index_endpoint_async(request_type=dict) - - -def test_update_index_endpoint_field_headers(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_endpoint_service.UpdateIndexEndpointRequest() - - request.index_endpoint.name = 'index_endpoint.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_index_endpoint), - '__call__') as call: - call.return_value = gca_index_endpoint.IndexEndpoint() - client.update_index_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'index_endpoint.name=index_endpoint.name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_index_endpoint_field_headers_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_endpoint_service.UpdateIndexEndpointRequest() - - request.index_endpoint.name = 'index_endpoint.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_index_endpoint), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_index_endpoint.IndexEndpoint()) - await client.update_index_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'index_endpoint.name=index_endpoint.name/value', - ) in kw['metadata'] - - -def test_update_index_endpoint_flattened(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_index_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_index_endpoint.IndexEndpoint() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_index_endpoint( - index_endpoint=gca_index_endpoint.IndexEndpoint(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].index_endpoint - mock_val = gca_index_endpoint.IndexEndpoint(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -def test_update_index_endpoint_flattened_error(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_index_endpoint( - index_endpoint_service.UpdateIndexEndpointRequest(), - index_endpoint=gca_index_endpoint.IndexEndpoint(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.asyncio -async def test_update_index_endpoint_flattened_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_index_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_index_endpoint.IndexEndpoint() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_index_endpoint.IndexEndpoint()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_index_endpoint( - index_endpoint=gca_index_endpoint.IndexEndpoint(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].index_endpoint - mock_val = gca_index_endpoint.IndexEndpoint(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_update_index_endpoint_flattened_error_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_index_endpoint( - index_endpoint_service.UpdateIndexEndpointRequest(), - index_endpoint=gca_index_endpoint.IndexEndpoint(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_delete_index_endpoint(transport: str = 'grpc', request_type=index_endpoint_service.DeleteIndexEndpointRequest): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_index_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_index_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.DeleteIndexEndpointRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_index_endpoint_from_dict(): - test_delete_index_endpoint(request_type=dict) - - -def test_delete_index_endpoint_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_index_endpoint), - '__call__') as call: - client.delete_index_endpoint() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.DeleteIndexEndpointRequest() - - -@pytest.mark.asyncio -async def test_delete_index_endpoint_async(transport: str = 'grpc_asyncio', request_type=index_endpoint_service.DeleteIndexEndpointRequest): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_index_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_index_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.DeleteIndexEndpointRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_index_endpoint_async_from_dict(): - await test_delete_index_endpoint_async(request_type=dict) - - -def test_delete_index_endpoint_field_headers(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_endpoint_service.DeleteIndexEndpointRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_index_endpoint), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_index_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_index_endpoint_field_headers_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_endpoint_service.DeleteIndexEndpointRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_index_endpoint), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_index_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_index_endpoint_flattened(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_index_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_index_endpoint( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_delete_index_endpoint_flattened_error(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_index_endpoint( - index_endpoint_service.DeleteIndexEndpointRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_index_endpoint_flattened_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_index_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_index_endpoint( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_index_endpoint_flattened_error_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_index_endpoint( - index_endpoint_service.DeleteIndexEndpointRequest(), - name='name_value', - ) - - -def test_deploy_index(transport: str = 'grpc', request_type=index_endpoint_service.DeployIndexRequest): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.deploy_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.DeployIndexRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_deploy_index_from_dict(): - test_deploy_index(request_type=dict) - - -def test_deploy_index_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_index), - '__call__') as call: - client.deploy_index() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.DeployIndexRequest() - - -@pytest.mark.asyncio -async def test_deploy_index_async(transport: str = 'grpc_asyncio', request_type=index_endpoint_service.DeployIndexRequest): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.deploy_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.DeployIndexRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_deploy_index_async_from_dict(): - await test_deploy_index_async(request_type=dict) - - -def test_deploy_index_field_headers(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_endpoint_service.DeployIndexRequest() - - request.index_endpoint = 'index_endpoint/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_index), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.deploy_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'index_endpoint=index_endpoint/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_deploy_index_field_headers_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_endpoint_service.DeployIndexRequest() - - request.index_endpoint = 'index_endpoint/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_index), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.deploy_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'index_endpoint=index_endpoint/value', - ) in kw['metadata'] - - -def test_deploy_index_flattened(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.deploy_index( - index_endpoint='index_endpoint_value', - deployed_index=gca_index_endpoint.DeployedIndex(id='id_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].index_endpoint - mock_val = 'index_endpoint_value' - assert arg == mock_val - arg = args[0].deployed_index - mock_val = gca_index_endpoint.DeployedIndex(id='id_value') - assert arg == mock_val - - -def test_deploy_index_flattened_error(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.deploy_index( - index_endpoint_service.DeployIndexRequest(), - index_endpoint='index_endpoint_value', - deployed_index=gca_index_endpoint.DeployedIndex(id='id_value'), - ) - - -@pytest.mark.asyncio -async def test_deploy_index_flattened_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.deploy_index( - index_endpoint='index_endpoint_value', - deployed_index=gca_index_endpoint.DeployedIndex(id='id_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].index_endpoint - mock_val = 'index_endpoint_value' - assert arg == mock_val - arg = args[0].deployed_index - mock_val = gca_index_endpoint.DeployedIndex(id='id_value') - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_deploy_index_flattened_error_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.deploy_index( - index_endpoint_service.DeployIndexRequest(), - index_endpoint='index_endpoint_value', - deployed_index=gca_index_endpoint.DeployedIndex(id='id_value'), - ) - - -def test_undeploy_index(transport: str = 'grpc', request_type=index_endpoint_service.UndeployIndexRequest): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.undeploy_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.UndeployIndexRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_undeploy_index_from_dict(): - test_undeploy_index(request_type=dict) - - -def test_undeploy_index_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_index), - '__call__') as call: - client.undeploy_index() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.UndeployIndexRequest() - - -@pytest.mark.asyncio -async def test_undeploy_index_async(transport: str = 'grpc_asyncio', request_type=index_endpoint_service.UndeployIndexRequest): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.undeploy_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.UndeployIndexRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_undeploy_index_async_from_dict(): - await test_undeploy_index_async(request_type=dict) - - -def test_undeploy_index_field_headers(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_endpoint_service.UndeployIndexRequest() - - request.index_endpoint = 'index_endpoint/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_index), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.undeploy_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'index_endpoint=index_endpoint/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_undeploy_index_field_headers_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_endpoint_service.UndeployIndexRequest() - - request.index_endpoint = 'index_endpoint/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_index), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.undeploy_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'index_endpoint=index_endpoint/value', - ) in kw['metadata'] - - -def test_undeploy_index_flattened(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.undeploy_index( - index_endpoint='index_endpoint_value', - deployed_index_id='deployed_index_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].index_endpoint - mock_val = 'index_endpoint_value' - assert arg == mock_val - arg = args[0].deployed_index_id - mock_val = 'deployed_index_id_value' - assert arg == mock_val - - -def test_undeploy_index_flattened_error(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.undeploy_index( - index_endpoint_service.UndeployIndexRequest(), - index_endpoint='index_endpoint_value', - deployed_index_id='deployed_index_id_value', - ) - - -@pytest.mark.asyncio -async def test_undeploy_index_flattened_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.undeploy_index( - index_endpoint='index_endpoint_value', - deployed_index_id='deployed_index_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].index_endpoint - mock_val = 'index_endpoint_value' - assert arg == mock_val - arg = args[0].deployed_index_id - mock_val = 'deployed_index_id_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_undeploy_index_flattened_error_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.undeploy_index( - index_endpoint_service.UndeployIndexRequest(), - index_endpoint='index_endpoint_value', - deployed_index_id='deployed_index_id_value', - ) - - -def test_mutate_deployed_index(transport: str = 'grpc', request_type=index_endpoint_service.MutateDeployedIndexRequest): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.mutate_deployed_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.mutate_deployed_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.MutateDeployedIndexRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_mutate_deployed_index_from_dict(): - test_mutate_deployed_index(request_type=dict) - - -def test_mutate_deployed_index_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.mutate_deployed_index), - '__call__') as call: - client.mutate_deployed_index() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.MutateDeployedIndexRequest() - - -@pytest.mark.asyncio -async def test_mutate_deployed_index_async(transport: str = 'grpc_asyncio', request_type=index_endpoint_service.MutateDeployedIndexRequest): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.mutate_deployed_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.mutate_deployed_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.MutateDeployedIndexRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_mutate_deployed_index_async_from_dict(): - await test_mutate_deployed_index_async(request_type=dict) - - -def test_mutate_deployed_index_field_headers(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_endpoint_service.MutateDeployedIndexRequest() - - request.index_endpoint = 'index_endpoint/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.mutate_deployed_index), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.mutate_deployed_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'index_endpoint=index_endpoint/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_mutate_deployed_index_field_headers_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_endpoint_service.MutateDeployedIndexRequest() - - request.index_endpoint = 'index_endpoint/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.mutate_deployed_index), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.mutate_deployed_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'index_endpoint=index_endpoint/value', - ) in kw['metadata'] - - -def test_mutate_deployed_index_flattened(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.mutate_deployed_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.mutate_deployed_index( - index_endpoint='index_endpoint_value', - deployed_index=gca_index_endpoint.DeployedIndex(id='id_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].index_endpoint - mock_val = 'index_endpoint_value' - assert arg == mock_val - arg = args[0].deployed_index - mock_val = gca_index_endpoint.DeployedIndex(id='id_value') - assert arg == mock_val - - -def test_mutate_deployed_index_flattened_error(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.mutate_deployed_index( - index_endpoint_service.MutateDeployedIndexRequest(), - index_endpoint='index_endpoint_value', - deployed_index=gca_index_endpoint.DeployedIndex(id='id_value'), - ) - - -@pytest.mark.asyncio -async def test_mutate_deployed_index_flattened_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.mutate_deployed_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.mutate_deployed_index( - index_endpoint='index_endpoint_value', - deployed_index=gca_index_endpoint.DeployedIndex(id='id_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].index_endpoint - mock_val = 'index_endpoint_value' - assert arg == mock_val - arg = args[0].deployed_index - mock_val = gca_index_endpoint.DeployedIndex(id='id_value') - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_mutate_deployed_index_flattened_error_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.mutate_deployed_index( - index_endpoint_service.MutateDeployedIndexRequest(), - index_endpoint='index_endpoint_value', - deployed_index=gca_index_endpoint.DeployedIndex(id='id_value'), - ) - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.IndexEndpointServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.IndexEndpointServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = IndexEndpointServiceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.IndexEndpointServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = IndexEndpointServiceClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.IndexEndpointServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = IndexEndpointServiceClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.IndexEndpointServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.IndexEndpointServiceGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.IndexEndpointServiceGrpcTransport, - transports.IndexEndpointServiceGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.IndexEndpointServiceGrpcTransport, - ) - -def test_index_endpoint_service_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.IndexEndpointServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_index_endpoint_service_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.aiplatform_v1.services.index_endpoint_service.transports.IndexEndpointServiceTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.IndexEndpointServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'create_index_endpoint', - 'get_index_endpoint', - 'list_index_endpoints', - 'update_index_endpoint', - 'delete_index_endpoint', - 'deploy_index', - 'undeploy_index', - 'mutate_deployed_index', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - with pytest.raises(NotImplementedError): - transport.close() - - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client - - -def test_index_endpoint_service_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.index_endpoint_service.transports.IndexEndpointServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.IndexEndpointServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -def test_index_endpoint_service_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1.services.index_endpoint_service.transports.IndexEndpointServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.IndexEndpointServiceTransport() - adc.assert_called_once() - - -def test_index_endpoint_service_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - IndexEndpointServiceClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.IndexEndpointServiceGrpcTransport, - transports.IndexEndpointServiceGrpcAsyncIOTransport, - ], -) -def test_index_endpoint_service_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.IndexEndpointServiceGrpcTransport, grpc_helpers), - (transports.IndexEndpointServiceGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_index_endpoint_service_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "aiplatform.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="aiplatform.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.IndexEndpointServiceGrpcTransport, transports.IndexEndpointServiceGrpcAsyncIOTransport]) -def test_index_endpoint_service_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_index_endpoint_service_host_no_port(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_index_endpoint_service_host_with_port(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:8000' - -def test_index_endpoint_service_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.IndexEndpointServiceGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_index_endpoint_service_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.IndexEndpointServiceGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.IndexEndpointServiceGrpcTransport, transports.IndexEndpointServiceGrpcAsyncIOTransport]) -def test_index_endpoint_service_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.IndexEndpointServiceGrpcTransport, transports.IndexEndpointServiceGrpcAsyncIOTransport]) -def test_index_endpoint_service_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_index_endpoint_service_grpc_lro_client(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_index_endpoint_service_grpc_lro_async_client(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc_asyncio', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsAsyncClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_index_path(): - project = "squid" - location = "clam" - index = "whelk" - expected = "projects/{project}/locations/{location}/indexes/{index}".format(project=project, location=location, index=index, ) - actual = IndexEndpointServiceClient.index_path(project, location, index) - assert expected == actual - - -def test_parse_index_path(): - expected = { - "project": "octopus", - "location": "oyster", - "index": "nudibranch", - } - path = IndexEndpointServiceClient.index_path(**expected) - - # Check that the path construction is reversible. - actual = IndexEndpointServiceClient.parse_index_path(path) - assert expected == actual - -def test_index_endpoint_path(): - project = "cuttlefish" - location = "mussel" - index_endpoint = "winkle" - expected = "projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}".format(project=project, location=location, index_endpoint=index_endpoint, ) - actual = IndexEndpointServiceClient.index_endpoint_path(project, location, index_endpoint) - assert expected == actual - - -def test_parse_index_endpoint_path(): - expected = { - "project": "nautilus", - "location": "scallop", - "index_endpoint": "abalone", - } - path = IndexEndpointServiceClient.index_endpoint_path(**expected) - - # Check that the path construction is reversible. - actual = IndexEndpointServiceClient.parse_index_endpoint_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "squid" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = IndexEndpointServiceClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "clam", - } - path = IndexEndpointServiceClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = IndexEndpointServiceClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "whelk" - expected = "folders/{folder}".format(folder=folder, ) - actual = IndexEndpointServiceClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "octopus", - } - path = IndexEndpointServiceClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = IndexEndpointServiceClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "oyster" - expected = "organizations/{organization}".format(organization=organization, ) - actual = IndexEndpointServiceClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "nudibranch", - } - path = IndexEndpointServiceClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = IndexEndpointServiceClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "cuttlefish" - expected = "projects/{project}".format(project=project, ) - actual = IndexEndpointServiceClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "mussel", - } - path = IndexEndpointServiceClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = IndexEndpointServiceClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "winkle" - location = "nautilus" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = IndexEndpointServiceClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "scallop", - "location": "abalone", - } - path = IndexEndpointServiceClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = IndexEndpointServiceClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.IndexEndpointServiceTransport, '_prep_wrapped_messages') as prep: - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.IndexEndpointServiceTransport, '_prep_wrapped_messages') as prep: - transport_class = IndexEndpointServiceClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - -@pytest.mark.asyncio -async def test_transport_close_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: - async with client: - close.assert_not_called() - close.assert_called_once() - -def test_transport_close(): - transports = { - "grpc": "_grpc_channel", - } - - for transport, close_name in transports.items(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: - with client: - close.assert_not_called() - close.assert_called_once() - -def test_client_ctx(): - transports = [ - 'grpc', - ] - for transport in transports: - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - # Test client calls underlying transport. - with mock.patch.object(type(client.transport), "close") as close: - close.assert_not_called() - with client: - pass - close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_index_service.py b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_index_service.py deleted file mode 100644 index 36b29a2d23..0000000000 --- a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_index_service.py +++ /dev/null @@ -1,2381 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import future -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import operation_async # type: ignore -from google.api_core import operations_v1 -from google.api_core import path_template -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.aiplatform_v1.services.index_service import IndexServiceAsyncClient -from google.cloud.aiplatform_v1.services.index_service import IndexServiceClient -from google.cloud.aiplatform_v1.services.index_service import pagers -from google.cloud.aiplatform_v1.services.index_service import transports -from google.cloud.aiplatform_v1.types import deployed_index_ref -from google.cloud.aiplatform_v1.types import index -from google.cloud.aiplatform_v1.types import index as gca_index -from google.cloud.aiplatform_v1.types import index_service -from google.cloud.aiplatform_v1.types import operation as gca_operation -from google.longrunning import operations_pb2 -from google.oauth2 import service_account -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -import google.auth - - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert IndexServiceClient._get_default_mtls_endpoint(None) is None - assert IndexServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert IndexServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert IndexServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert IndexServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert IndexServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - IndexServiceClient, - IndexServiceAsyncClient, -]) -def test_index_service_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.IndexServiceGrpcTransport, "grpc"), - (transports.IndexServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_index_service_client_service_account_always_use_jwt(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=False) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("client_class", [ - IndexServiceClient, - IndexServiceAsyncClient, -]) -def test_index_service_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_index_service_client_get_transport_class(): - transport = IndexServiceClient.get_transport_class() - available_transports = [ - transports.IndexServiceGrpcTransport, - ] - assert transport in available_transports - - transport = IndexServiceClient.get_transport_class("grpc") - assert transport == transports.IndexServiceGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (IndexServiceClient, transports.IndexServiceGrpcTransport, "grpc"), - (IndexServiceAsyncClient, transports.IndexServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(IndexServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IndexServiceClient)) -@mock.patch.object(IndexServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IndexServiceAsyncClient)) -def test_index_service_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(IndexServiceClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(IndexServiceClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (IndexServiceClient, transports.IndexServiceGrpcTransport, "grpc", "true"), - (IndexServiceAsyncClient, transports.IndexServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (IndexServiceClient, transports.IndexServiceGrpcTransport, "grpc", "false"), - (IndexServiceAsyncClient, transports.IndexServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(IndexServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IndexServiceClient)) -@mock.patch.object(IndexServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IndexServiceAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_index_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (IndexServiceClient, transports.IndexServiceGrpcTransport, "grpc"), - (IndexServiceAsyncClient, transports.IndexServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_index_service_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (IndexServiceClient, transports.IndexServiceGrpcTransport, "grpc"), - (IndexServiceAsyncClient, transports.IndexServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_index_service_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_index_service_client_client_options_from_dict(): - with mock.patch('google.cloud.aiplatform_v1.services.index_service.transports.IndexServiceGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = IndexServiceClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_create_index(transport: str = 'grpc', request_type=index_service.CreateIndexRequest): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.create_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == index_service.CreateIndexRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_create_index_from_dict(): - test_create_index(request_type=dict) - - -def test_create_index_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_index), - '__call__') as call: - client.create_index() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == index_service.CreateIndexRequest() - - -@pytest.mark.asyncio -async def test_create_index_async(transport: str = 'grpc_asyncio', request_type=index_service.CreateIndexRequest): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.create_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == index_service.CreateIndexRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_create_index_async_from_dict(): - await test_create_index_async(request_type=dict) - - -def test_create_index_field_headers(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_service.CreateIndexRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_index), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.create_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_index_field_headers_async(): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_service.CreateIndexRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_index), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.create_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_index_flattened(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_index( - parent='parent_value', - index=gca_index.Index(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].index - mock_val = gca_index.Index(name='name_value') - assert arg == mock_val - - -def test_create_index_flattened_error(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_index( - index_service.CreateIndexRequest(), - parent='parent_value', - index=gca_index.Index(name='name_value'), - ) - - -@pytest.mark.asyncio -async def test_create_index_flattened_async(): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_index( - parent='parent_value', - index=gca_index.Index(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].index - mock_val = gca_index.Index(name='name_value') - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_index_flattened_error_async(): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_index( - index_service.CreateIndexRequest(), - parent='parent_value', - index=gca_index.Index(name='name_value'), - ) - - -def test_get_index(transport: str = 'grpc', request_type=index_service.GetIndexRequest): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = index.Index( - name='name_value', - display_name='display_name_value', - description='description_value', - metadata_schema_uri='metadata_schema_uri_value', - etag='etag_value', - ) - response = client.get_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == index_service.GetIndexRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, index.Index) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.metadata_schema_uri == 'metadata_schema_uri_value' - assert response.etag == 'etag_value' - - -def test_get_index_from_dict(): - test_get_index(request_type=dict) - - -def test_get_index_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_index), - '__call__') as call: - client.get_index() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == index_service.GetIndexRequest() - - -@pytest.mark.asyncio -async def test_get_index_async(transport: str = 'grpc_asyncio', request_type=index_service.GetIndexRequest): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(index.Index( - name='name_value', - display_name='display_name_value', - description='description_value', - metadata_schema_uri='metadata_schema_uri_value', - etag='etag_value', - )) - response = await client.get_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == index_service.GetIndexRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, index.Index) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.metadata_schema_uri == 'metadata_schema_uri_value' - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_get_index_async_from_dict(): - await test_get_index_async(request_type=dict) - - -def test_get_index_field_headers(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_service.GetIndexRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_index), - '__call__') as call: - call.return_value = index.Index() - client.get_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_index_field_headers_async(): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_service.GetIndexRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_index), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(index.Index()) - await client.get_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_index_flattened(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = index.Index() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_index( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_index_flattened_error(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_index( - index_service.GetIndexRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_index_flattened_async(): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = index.Index() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(index.Index()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_index( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_index_flattened_error_async(): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_index( - index_service.GetIndexRequest(), - name='name_value', - ) - - -def test_list_indexes(transport: str = 'grpc', request_type=index_service.ListIndexesRequest): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_indexes), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = index_service.ListIndexesResponse( - next_page_token='next_page_token_value', - ) - response = client.list_indexes(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == index_service.ListIndexesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListIndexesPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_indexes_from_dict(): - test_list_indexes(request_type=dict) - - -def test_list_indexes_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_indexes), - '__call__') as call: - client.list_indexes() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == index_service.ListIndexesRequest() - - -@pytest.mark.asyncio -async def test_list_indexes_async(transport: str = 'grpc_asyncio', request_type=index_service.ListIndexesRequest): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_indexes), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(index_service.ListIndexesResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_indexes(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == index_service.ListIndexesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListIndexesAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_indexes_async_from_dict(): - await test_list_indexes_async(request_type=dict) - - -def test_list_indexes_field_headers(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_service.ListIndexesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_indexes), - '__call__') as call: - call.return_value = index_service.ListIndexesResponse() - client.list_indexes(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_indexes_field_headers_async(): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_service.ListIndexesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_indexes), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(index_service.ListIndexesResponse()) - await client.list_indexes(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_indexes_flattened(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_indexes), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = index_service.ListIndexesResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_indexes( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_indexes_flattened_error(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_indexes( - index_service.ListIndexesRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_indexes_flattened_async(): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_indexes), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = index_service.ListIndexesResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(index_service.ListIndexesResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_indexes( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_indexes_flattened_error_async(): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_indexes( - index_service.ListIndexesRequest(), - parent='parent_value', - ) - - -def test_list_indexes_pager(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_indexes), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - index_service.ListIndexesResponse( - indexes=[ - index.Index(), - index.Index(), - index.Index(), - ], - next_page_token='abc', - ), - index_service.ListIndexesResponse( - indexes=[], - next_page_token='def', - ), - index_service.ListIndexesResponse( - indexes=[ - index.Index(), - ], - next_page_token='ghi', - ), - index_service.ListIndexesResponse( - indexes=[ - index.Index(), - index.Index(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_indexes(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, index.Index) - for i in results) - -def test_list_indexes_pages(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_indexes), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - index_service.ListIndexesResponse( - indexes=[ - index.Index(), - index.Index(), - index.Index(), - ], - next_page_token='abc', - ), - index_service.ListIndexesResponse( - indexes=[], - next_page_token='def', - ), - index_service.ListIndexesResponse( - indexes=[ - index.Index(), - ], - next_page_token='ghi', - ), - index_service.ListIndexesResponse( - indexes=[ - index.Index(), - index.Index(), - ], - ), - RuntimeError, - ) - pages = list(client.list_indexes(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_indexes_async_pager(): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_indexes), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - index_service.ListIndexesResponse( - indexes=[ - index.Index(), - index.Index(), - index.Index(), - ], - next_page_token='abc', - ), - index_service.ListIndexesResponse( - indexes=[], - next_page_token='def', - ), - index_service.ListIndexesResponse( - indexes=[ - index.Index(), - ], - next_page_token='ghi', - ), - index_service.ListIndexesResponse( - indexes=[ - index.Index(), - index.Index(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_indexes(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, index.Index) - for i in responses) - -@pytest.mark.asyncio -async def test_list_indexes_async_pages(): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_indexes), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - index_service.ListIndexesResponse( - indexes=[ - index.Index(), - index.Index(), - index.Index(), - ], - next_page_token='abc', - ), - index_service.ListIndexesResponse( - indexes=[], - next_page_token='def', - ), - index_service.ListIndexesResponse( - indexes=[ - index.Index(), - ], - next_page_token='ghi', - ), - index_service.ListIndexesResponse( - indexes=[ - index.Index(), - index.Index(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_indexes(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_update_index(transport: str = 'grpc', request_type=index_service.UpdateIndexRequest): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.update_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == index_service.UpdateIndexRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_update_index_from_dict(): - test_update_index(request_type=dict) - - -def test_update_index_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_index), - '__call__') as call: - client.update_index() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == index_service.UpdateIndexRequest() - - -@pytest.mark.asyncio -async def test_update_index_async(transport: str = 'grpc_asyncio', request_type=index_service.UpdateIndexRequest): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.update_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == index_service.UpdateIndexRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_update_index_async_from_dict(): - await test_update_index_async(request_type=dict) - - -def test_update_index_field_headers(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_service.UpdateIndexRequest() - - request.index.name = 'index.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_index), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.update_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'index.name=index.name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_index_field_headers_async(): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_service.UpdateIndexRequest() - - request.index.name = 'index.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_index), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.update_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'index.name=index.name/value', - ) in kw['metadata'] - - -def test_update_index_flattened(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_index( - index=gca_index.Index(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].index - mock_val = gca_index.Index(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -def test_update_index_flattened_error(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_index( - index_service.UpdateIndexRequest(), - index=gca_index.Index(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.asyncio -async def test_update_index_flattened_async(): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_index( - index=gca_index.Index(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].index - mock_val = gca_index.Index(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_update_index_flattened_error_async(): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_index( - index_service.UpdateIndexRequest(), - index=gca_index.Index(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_delete_index(transport: str = 'grpc', request_type=index_service.DeleteIndexRequest): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == index_service.DeleteIndexRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_index_from_dict(): - test_delete_index(request_type=dict) - - -def test_delete_index_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_index), - '__call__') as call: - client.delete_index() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == index_service.DeleteIndexRequest() - - -@pytest.mark.asyncio -async def test_delete_index_async(transport: str = 'grpc_asyncio', request_type=index_service.DeleteIndexRequest): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == index_service.DeleteIndexRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_index_async_from_dict(): - await test_delete_index_async(request_type=dict) - - -def test_delete_index_field_headers(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_service.DeleteIndexRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_index), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_index_field_headers_async(): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_service.DeleteIndexRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_index), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_index_flattened(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_index( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_delete_index_flattened_error(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_index( - index_service.DeleteIndexRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_index_flattened_async(): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_index( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_index_flattened_error_async(): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_index( - index_service.DeleteIndexRequest(), - name='name_value', - ) - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.IndexServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.IndexServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = IndexServiceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.IndexServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = IndexServiceClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.IndexServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = IndexServiceClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.IndexServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.IndexServiceGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.IndexServiceGrpcTransport, - transports.IndexServiceGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.IndexServiceGrpcTransport, - ) - -def test_index_service_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.IndexServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_index_service_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.aiplatform_v1.services.index_service.transports.IndexServiceTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.IndexServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'create_index', - 'get_index', - 'list_indexes', - 'update_index', - 'delete_index', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - with pytest.raises(NotImplementedError): - transport.close() - - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client - - -def test_index_service_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.index_service.transports.IndexServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.IndexServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -def test_index_service_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1.services.index_service.transports.IndexServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.IndexServiceTransport() - adc.assert_called_once() - - -def test_index_service_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - IndexServiceClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.IndexServiceGrpcTransport, - transports.IndexServiceGrpcAsyncIOTransport, - ], -) -def test_index_service_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.IndexServiceGrpcTransport, grpc_helpers), - (transports.IndexServiceGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_index_service_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "aiplatform.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="aiplatform.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.IndexServiceGrpcTransport, transports.IndexServiceGrpcAsyncIOTransport]) -def test_index_service_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_index_service_host_no_port(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_index_service_host_with_port(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:8000' - -def test_index_service_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.IndexServiceGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_index_service_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.IndexServiceGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.IndexServiceGrpcTransport, transports.IndexServiceGrpcAsyncIOTransport]) -def test_index_service_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.IndexServiceGrpcTransport, transports.IndexServiceGrpcAsyncIOTransport]) -def test_index_service_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_index_service_grpc_lro_client(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_index_service_grpc_lro_async_client(): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc_asyncio', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsAsyncClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_index_path(): - project = "squid" - location = "clam" - index = "whelk" - expected = "projects/{project}/locations/{location}/indexes/{index}".format(project=project, location=location, index=index, ) - actual = IndexServiceClient.index_path(project, location, index) - assert expected == actual - - -def test_parse_index_path(): - expected = { - "project": "octopus", - "location": "oyster", - "index": "nudibranch", - } - path = IndexServiceClient.index_path(**expected) - - # Check that the path construction is reversible. - actual = IndexServiceClient.parse_index_path(path) - assert expected == actual - -def test_index_endpoint_path(): - project = "cuttlefish" - location = "mussel" - index_endpoint = "winkle" - expected = "projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}".format(project=project, location=location, index_endpoint=index_endpoint, ) - actual = IndexServiceClient.index_endpoint_path(project, location, index_endpoint) - assert expected == actual - - -def test_parse_index_endpoint_path(): - expected = { - "project": "nautilus", - "location": "scallop", - "index_endpoint": "abalone", - } - path = IndexServiceClient.index_endpoint_path(**expected) - - # Check that the path construction is reversible. - actual = IndexServiceClient.parse_index_endpoint_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "squid" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = IndexServiceClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "clam", - } - path = IndexServiceClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = IndexServiceClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "whelk" - expected = "folders/{folder}".format(folder=folder, ) - actual = IndexServiceClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "octopus", - } - path = IndexServiceClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = IndexServiceClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "oyster" - expected = "organizations/{organization}".format(organization=organization, ) - actual = IndexServiceClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "nudibranch", - } - path = IndexServiceClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = IndexServiceClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "cuttlefish" - expected = "projects/{project}".format(project=project, ) - actual = IndexServiceClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "mussel", - } - path = IndexServiceClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = IndexServiceClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "winkle" - location = "nautilus" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = IndexServiceClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "scallop", - "location": "abalone", - } - path = IndexServiceClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = IndexServiceClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.IndexServiceTransport, '_prep_wrapped_messages') as prep: - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.IndexServiceTransport, '_prep_wrapped_messages') as prep: - transport_class = IndexServiceClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - -@pytest.mark.asyncio -async def test_transport_close_async(): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: - async with client: - close.assert_not_called() - close.assert_called_once() - -def test_transport_close(): - transports = { - "grpc": "_grpc_channel", - } - - for transport, close_name in transports.items(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: - with client: - close.assert_not_called() - close.assert_called_once() - -def test_client_ctx(): - transports = [ - 'grpc', - ] - for transport in transports: - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - # Test client calls underlying transport. - with mock.patch.object(type(client.transport), "close") as close: - close.assert_not_called() - with client: - pass - close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_job_service.py b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_job_service.py deleted file mode 100644 index 6039ffeb80..0000000000 --- a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_job_service.py +++ /dev/null @@ -1,9153 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import future -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import operation_async # type: ignore -from google.api_core import operations_v1 -from google.api_core import path_template -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.aiplatform_v1.services.job_service import JobServiceAsyncClient -from google.cloud.aiplatform_v1.services.job_service import JobServiceClient -from google.cloud.aiplatform_v1.services.job_service import pagers -from google.cloud.aiplatform_v1.services.job_service import transports -from google.cloud.aiplatform_v1.types import accelerator_type -from google.cloud.aiplatform_v1.types import batch_prediction_job -from google.cloud.aiplatform_v1.types import batch_prediction_job as gca_batch_prediction_job -from google.cloud.aiplatform_v1.types import completion_stats -from google.cloud.aiplatform_v1.types import custom_job -from google.cloud.aiplatform_v1.types import custom_job as gca_custom_job -from google.cloud.aiplatform_v1.types import data_labeling_job -from google.cloud.aiplatform_v1.types import data_labeling_job as gca_data_labeling_job -from google.cloud.aiplatform_v1.types import encryption_spec -from google.cloud.aiplatform_v1.types import env_var -from google.cloud.aiplatform_v1.types import explanation -from google.cloud.aiplatform_v1.types import explanation_metadata -from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job -from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job as gca_hyperparameter_tuning_job -from google.cloud.aiplatform_v1.types import io -from google.cloud.aiplatform_v1.types import job_service -from google.cloud.aiplatform_v1.types import job_state -from google.cloud.aiplatform_v1.types import machine_resources -from google.cloud.aiplatform_v1.types import manual_batch_tuning_parameters -from google.cloud.aiplatform_v1.types import model -from google.cloud.aiplatform_v1.types import model_deployment_monitoring_job -from google.cloud.aiplatform_v1.types import model_deployment_monitoring_job as gca_model_deployment_monitoring_job -from google.cloud.aiplatform_v1.types import model_monitoring -from google.cloud.aiplatform_v1.types import operation as gca_operation -from google.cloud.aiplatform_v1.types import study -from google.cloud.aiplatform_v1.types import unmanaged_container_model -from google.longrunning import operations_pb2 -from google.oauth2 import service_account -from google.protobuf import any_pb2 # type: ignore -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore -from google.type import money_pb2 # type: ignore -import google.auth - - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert JobServiceClient._get_default_mtls_endpoint(None) is None - assert JobServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert JobServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert JobServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert JobServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert JobServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - JobServiceClient, - JobServiceAsyncClient, -]) -def test_job_service_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.JobServiceGrpcTransport, "grpc"), - (transports.JobServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_job_service_client_service_account_always_use_jwt(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=False) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("client_class", [ - JobServiceClient, - JobServiceAsyncClient, -]) -def test_job_service_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_job_service_client_get_transport_class(): - transport = JobServiceClient.get_transport_class() - available_transports = [ - transports.JobServiceGrpcTransport, - ] - assert transport in available_transports - - transport = JobServiceClient.get_transport_class("grpc") - assert transport == transports.JobServiceGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (JobServiceClient, transports.JobServiceGrpcTransport, "grpc"), - (JobServiceAsyncClient, transports.JobServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(JobServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobServiceClient)) -@mock.patch.object(JobServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobServiceAsyncClient)) -def test_job_service_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(JobServiceClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(JobServiceClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (JobServiceClient, transports.JobServiceGrpcTransport, "grpc", "true"), - (JobServiceAsyncClient, transports.JobServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (JobServiceClient, transports.JobServiceGrpcTransport, "grpc", "false"), - (JobServiceAsyncClient, transports.JobServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(JobServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobServiceClient)) -@mock.patch.object(JobServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobServiceAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_job_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (JobServiceClient, transports.JobServiceGrpcTransport, "grpc"), - (JobServiceAsyncClient, transports.JobServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_job_service_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (JobServiceClient, transports.JobServiceGrpcTransport, "grpc"), - (JobServiceAsyncClient, transports.JobServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_job_service_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_job_service_client_client_options_from_dict(): - with mock.patch('google.cloud.aiplatform_v1.services.job_service.transports.JobServiceGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = JobServiceClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_create_custom_job(transport: str = 'grpc', request_type=job_service.CreateCustomJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_custom_job.CustomJob( - name='name_value', - display_name='display_name_value', - state=job_state.JobState.JOB_STATE_QUEUED, - ) - response = client.create_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CreateCustomJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_custom_job.CustomJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == job_state.JobState.JOB_STATE_QUEUED - - -def test_create_custom_job_from_dict(): - test_create_custom_job(request_type=dict) - - -def test_create_custom_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_custom_job), - '__call__') as call: - client.create_custom_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CreateCustomJobRequest() - - -@pytest.mark.asyncio -async def test_create_custom_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CreateCustomJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_custom_job.CustomJob( - name='name_value', - display_name='display_name_value', - state=job_state.JobState.JOB_STATE_QUEUED, - )) - response = await client.create_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CreateCustomJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_custom_job.CustomJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == job_state.JobState.JOB_STATE_QUEUED - - -@pytest.mark.asyncio -async def test_create_custom_job_async_from_dict(): - await test_create_custom_job_async(request_type=dict) - - -def test_create_custom_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CreateCustomJobRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_custom_job), - '__call__') as call: - call.return_value = gca_custom_job.CustomJob() - client.create_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_custom_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CreateCustomJobRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_custom_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_custom_job.CustomJob()) - await client.create_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_custom_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_custom_job.CustomJob() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_custom_job( - parent='parent_value', - custom_job=gca_custom_job.CustomJob(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].custom_job - mock_val = gca_custom_job.CustomJob(name='name_value') - assert arg == mock_val - - -def test_create_custom_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_custom_job( - job_service.CreateCustomJobRequest(), - parent='parent_value', - custom_job=gca_custom_job.CustomJob(name='name_value'), - ) - - -@pytest.mark.asyncio -async def test_create_custom_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_custom_job.CustomJob() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_custom_job.CustomJob()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_custom_job( - parent='parent_value', - custom_job=gca_custom_job.CustomJob(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].custom_job - mock_val = gca_custom_job.CustomJob(name='name_value') - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_custom_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_custom_job( - job_service.CreateCustomJobRequest(), - parent='parent_value', - custom_job=gca_custom_job.CustomJob(name='name_value'), - ) - - -def test_get_custom_job(transport: str = 'grpc', request_type=job_service.GetCustomJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = custom_job.CustomJob( - name='name_value', - display_name='display_name_value', - state=job_state.JobState.JOB_STATE_QUEUED, - ) - response = client.get_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.GetCustomJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, custom_job.CustomJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == job_state.JobState.JOB_STATE_QUEUED - - -def test_get_custom_job_from_dict(): - test_get_custom_job(request_type=dict) - - -def test_get_custom_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_custom_job), - '__call__') as call: - client.get_custom_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.GetCustomJobRequest() - - -@pytest.mark.asyncio -async def test_get_custom_job_async(transport: str = 'grpc_asyncio', request_type=job_service.GetCustomJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(custom_job.CustomJob( - name='name_value', - display_name='display_name_value', - state=job_state.JobState.JOB_STATE_QUEUED, - )) - response = await client.get_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.GetCustomJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, custom_job.CustomJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == job_state.JobState.JOB_STATE_QUEUED - - -@pytest.mark.asyncio -async def test_get_custom_job_async_from_dict(): - await test_get_custom_job_async(request_type=dict) - - -def test_get_custom_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.GetCustomJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_custom_job), - '__call__') as call: - call.return_value = custom_job.CustomJob() - client.get_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_custom_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.GetCustomJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_custom_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(custom_job.CustomJob()) - await client.get_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_custom_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = custom_job.CustomJob() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_custom_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_custom_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_custom_job( - job_service.GetCustomJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_custom_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = custom_job.CustomJob() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(custom_job.CustomJob()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_custom_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_custom_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_custom_job( - job_service.GetCustomJobRequest(), - name='name_value', - ) - - -def test_list_custom_jobs(transport: str = 'grpc', request_type=job_service.ListCustomJobsRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_custom_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.ListCustomJobsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_custom_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ListCustomJobsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListCustomJobsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_custom_jobs_from_dict(): - test_list_custom_jobs(request_type=dict) - - -def test_list_custom_jobs_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_custom_jobs), - '__call__') as call: - client.list_custom_jobs() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ListCustomJobsRequest() - - -@pytest.mark.asyncio -async def test_list_custom_jobs_async(transport: str = 'grpc_asyncio', request_type=job_service.ListCustomJobsRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_custom_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListCustomJobsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_custom_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ListCustomJobsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListCustomJobsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_custom_jobs_async_from_dict(): - await test_list_custom_jobs_async(request_type=dict) - - -def test_list_custom_jobs_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.ListCustomJobsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_custom_jobs), - '__call__') as call: - call.return_value = job_service.ListCustomJobsResponse() - client.list_custom_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_custom_jobs_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.ListCustomJobsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_custom_jobs), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListCustomJobsResponse()) - await client.list_custom_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_custom_jobs_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_custom_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.ListCustomJobsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_custom_jobs( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_custom_jobs_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_custom_jobs( - job_service.ListCustomJobsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_custom_jobs_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_custom_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.ListCustomJobsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListCustomJobsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_custom_jobs( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_custom_jobs_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_custom_jobs( - job_service.ListCustomJobsRequest(), - parent='parent_value', - ) - - -def test_list_custom_jobs_pager(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_custom_jobs), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListCustomJobsResponse( - custom_jobs=[ - custom_job.CustomJob(), - custom_job.CustomJob(), - custom_job.CustomJob(), - ], - next_page_token='abc', - ), - job_service.ListCustomJobsResponse( - custom_jobs=[], - next_page_token='def', - ), - job_service.ListCustomJobsResponse( - custom_jobs=[ - custom_job.CustomJob(), - ], - next_page_token='ghi', - ), - job_service.ListCustomJobsResponse( - custom_jobs=[ - custom_job.CustomJob(), - custom_job.CustomJob(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_custom_jobs(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, custom_job.CustomJob) - for i in results) - -def test_list_custom_jobs_pages(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_custom_jobs), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListCustomJobsResponse( - custom_jobs=[ - custom_job.CustomJob(), - custom_job.CustomJob(), - custom_job.CustomJob(), - ], - next_page_token='abc', - ), - job_service.ListCustomJobsResponse( - custom_jobs=[], - next_page_token='def', - ), - job_service.ListCustomJobsResponse( - custom_jobs=[ - custom_job.CustomJob(), - ], - next_page_token='ghi', - ), - job_service.ListCustomJobsResponse( - custom_jobs=[ - custom_job.CustomJob(), - custom_job.CustomJob(), - ], - ), - RuntimeError, - ) - pages = list(client.list_custom_jobs(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_custom_jobs_async_pager(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_custom_jobs), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListCustomJobsResponse( - custom_jobs=[ - custom_job.CustomJob(), - custom_job.CustomJob(), - custom_job.CustomJob(), - ], - next_page_token='abc', - ), - job_service.ListCustomJobsResponse( - custom_jobs=[], - next_page_token='def', - ), - job_service.ListCustomJobsResponse( - custom_jobs=[ - custom_job.CustomJob(), - ], - next_page_token='ghi', - ), - job_service.ListCustomJobsResponse( - custom_jobs=[ - custom_job.CustomJob(), - custom_job.CustomJob(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_custom_jobs(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, custom_job.CustomJob) - for i in responses) - -@pytest.mark.asyncio -async def test_list_custom_jobs_async_pages(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_custom_jobs), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListCustomJobsResponse( - custom_jobs=[ - custom_job.CustomJob(), - custom_job.CustomJob(), - custom_job.CustomJob(), - ], - next_page_token='abc', - ), - job_service.ListCustomJobsResponse( - custom_jobs=[], - next_page_token='def', - ), - job_service.ListCustomJobsResponse( - custom_jobs=[ - custom_job.CustomJob(), - ], - next_page_token='ghi', - ), - job_service.ListCustomJobsResponse( - custom_jobs=[ - custom_job.CustomJob(), - custom_job.CustomJob(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_custom_jobs(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_delete_custom_job(transport: str = 'grpc', request_type=job_service.DeleteCustomJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.DeleteCustomJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_custom_job_from_dict(): - test_delete_custom_job(request_type=dict) - - -def test_delete_custom_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_custom_job), - '__call__') as call: - client.delete_custom_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.DeleteCustomJobRequest() - - -@pytest.mark.asyncio -async def test_delete_custom_job_async(transport: str = 'grpc_asyncio', request_type=job_service.DeleteCustomJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.DeleteCustomJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_custom_job_async_from_dict(): - await test_delete_custom_job_async(request_type=dict) - - -def test_delete_custom_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.DeleteCustomJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_custom_job), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_custom_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.DeleteCustomJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_custom_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_custom_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_custom_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_delete_custom_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_custom_job( - job_service.DeleteCustomJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_custom_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_custom_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_custom_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_custom_job( - job_service.DeleteCustomJobRequest(), - name='name_value', - ) - - -def test_cancel_custom_job(transport: str = 'grpc', request_type=job_service.CancelCustomJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.cancel_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CancelCustomJobRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -def test_cancel_custom_job_from_dict(): - test_cancel_custom_job(request_type=dict) - - -def test_cancel_custom_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_custom_job), - '__call__') as call: - client.cancel_custom_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CancelCustomJobRequest() - - -@pytest.mark.asyncio -async def test_cancel_custom_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CancelCustomJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.cancel_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CancelCustomJobRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_cancel_custom_job_async_from_dict(): - await test_cancel_custom_job_async(request_type=dict) - - -def test_cancel_custom_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CancelCustomJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_custom_job), - '__call__') as call: - call.return_value = None - client.cancel_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_cancel_custom_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CancelCustomJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_custom_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.cancel_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_cancel_custom_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.cancel_custom_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_cancel_custom_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.cancel_custom_job( - job_service.CancelCustomJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_cancel_custom_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.cancel_custom_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_cancel_custom_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.cancel_custom_job( - job_service.CancelCustomJobRequest(), - name='name_value', - ) - - -def test_create_data_labeling_job(transport: str = 'grpc', request_type=job_service.CreateDataLabelingJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_data_labeling_job.DataLabelingJob( - name='name_value', - display_name='display_name_value', - datasets=['datasets_value'], - labeler_count=1375, - instruction_uri='instruction_uri_value', - inputs_schema_uri='inputs_schema_uri_value', - state=job_state.JobState.JOB_STATE_QUEUED, - labeling_progress=1810, - specialist_pools=['specialist_pools_value'], - ) - response = client.create_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CreateDataLabelingJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_data_labeling_job.DataLabelingJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.datasets == ['datasets_value'] - assert response.labeler_count == 1375 - assert response.instruction_uri == 'instruction_uri_value' - assert response.inputs_schema_uri == 'inputs_schema_uri_value' - assert response.state == job_state.JobState.JOB_STATE_QUEUED - assert response.labeling_progress == 1810 - assert response.specialist_pools == ['specialist_pools_value'] - - -def test_create_data_labeling_job_from_dict(): - test_create_data_labeling_job(request_type=dict) - - -def test_create_data_labeling_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_data_labeling_job), - '__call__') as call: - client.create_data_labeling_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CreateDataLabelingJobRequest() - - -@pytest.mark.asyncio -async def test_create_data_labeling_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CreateDataLabelingJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_data_labeling_job.DataLabelingJob( - name='name_value', - display_name='display_name_value', - datasets=['datasets_value'], - labeler_count=1375, - instruction_uri='instruction_uri_value', - inputs_schema_uri='inputs_schema_uri_value', - state=job_state.JobState.JOB_STATE_QUEUED, - labeling_progress=1810, - specialist_pools=['specialist_pools_value'], - )) - response = await client.create_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CreateDataLabelingJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_data_labeling_job.DataLabelingJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.datasets == ['datasets_value'] - assert response.labeler_count == 1375 - assert response.instruction_uri == 'instruction_uri_value' - assert response.inputs_schema_uri == 'inputs_schema_uri_value' - assert response.state == job_state.JobState.JOB_STATE_QUEUED - assert response.labeling_progress == 1810 - assert response.specialist_pools == ['specialist_pools_value'] - - -@pytest.mark.asyncio -async def test_create_data_labeling_job_async_from_dict(): - await test_create_data_labeling_job_async(request_type=dict) - - -def test_create_data_labeling_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CreateDataLabelingJobRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_data_labeling_job), - '__call__') as call: - call.return_value = gca_data_labeling_job.DataLabelingJob() - client.create_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_data_labeling_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CreateDataLabelingJobRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_data_labeling_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_data_labeling_job.DataLabelingJob()) - await client.create_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_data_labeling_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_data_labeling_job.DataLabelingJob() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_data_labeling_job( - parent='parent_value', - data_labeling_job=gca_data_labeling_job.DataLabelingJob(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].data_labeling_job - mock_val = gca_data_labeling_job.DataLabelingJob(name='name_value') - assert arg == mock_val - - -def test_create_data_labeling_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_data_labeling_job( - job_service.CreateDataLabelingJobRequest(), - parent='parent_value', - data_labeling_job=gca_data_labeling_job.DataLabelingJob(name='name_value'), - ) - - -@pytest.mark.asyncio -async def test_create_data_labeling_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_data_labeling_job.DataLabelingJob() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_data_labeling_job.DataLabelingJob()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_data_labeling_job( - parent='parent_value', - data_labeling_job=gca_data_labeling_job.DataLabelingJob(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].data_labeling_job - mock_val = gca_data_labeling_job.DataLabelingJob(name='name_value') - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_data_labeling_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_data_labeling_job( - job_service.CreateDataLabelingJobRequest(), - parent='parent_value', - data_labeling_job=gca_data_labeling_job.DataLabelingJob(name='name_value'), - ) - - -def test_get_data_labeling_job(transport: str = 'grpc', request_type=job_service.GetDataLabelingJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = data_labeling_job.DataLabelingJob( - name='name_value', - display_name='display_name_value', - datasets=['datasets_value'], - labeler_count=1375, - instruction_uri='instruction_uri_value', - inputs_schema_uri='inputs_schema_uri_value', - state=job_state.JobState.JOB_STATE_QUEUED, - labeling_progress=1810, - specialist_pools=['specialist_pools_value'], - ) - response = client.get_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.GetDataLabelingJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, data_labeling_job.DataLabelingJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.datasets == ['datasets_value'] - assert response.labeler_count == 1375 - assert response.instruction_uri == 'instruction_uri_value' - assert response.inputs_schema_uri == 'inputs_schema_uri_value' - assert response.state == job_state.JobState.JOB_STATE_QUEUED - assert response.labeling_progress == 1810 - assert response.specialist_pools == ['specialist_pools_value'] - - -def test_get_data_labeling_job_from_dict(): - test_get_data_labeling_job(request_type=dict) - - -def test_get_data_labeling_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_data_labeling_job), - '__call__') as call: - client.get_data_labeling_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.GetDataLabelingJobRequest() - - -@pytest.mark.asyncio -async def test_get_data_labeling_job_async(transport: str = 'grpc_asyncio', request_type=job_service.GetDataLabelingJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(data_labeling_job.DataLabelingJob( - name='name_value', - display_name='display_name_value', - datasets=['datasets_value'], - labeler_count=1375, - instruction_uri='instruction_uri_value', - inputs_schema_uri='inputs_schema_uri_value', - state=job_state.JobState.JOB_STATE_QUEUED, - labeling_progress=1810, - specialist_pools=['specialist_pools_value'], - )) - response = await client.get_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.GetDataLabelingJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, data_labeling_job.DataLabelingJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.datasets == ['datasets_value'] - assert response.labeler_count == 1375 - assert response.instruction_uri == 'instruction_uri_value' - assert response.inputs_schema_uri == 'inputs_schema_uri_value' - assert response.state == job_state.JobState.JOB_STATE_QUEUED - assert response.labeling_progress == 1810 - assert response.specialist_pools == ['specialist_pools_value'] - - -@pytest.mark.asyncio -async def test_get_data_labeling_job_async_from_dict(): - await test_get_data_labeling_job_async(request_type=dict) - - -def test_get_data_labeling_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.GetDataLabelingJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_data_labeling_job), - '__call__') as call: - call.return_value = data_labeling_job.DataLabelingJob() - client.get_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_data_labeling_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.GetDataLabelingJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_data_labeling_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(data_labeling_job.DataLabelingJob()) - await client.get_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_data_labeling_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = data_labeling_job.DataLabelingJob() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_data_labeling_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_data_labeling_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_data_labeling_job( - job_service.GetDataLabelingJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_data_labeling_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = data_labeling_job.DataLabelingJob() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(data_labeling_job.DataLabelingJob()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_data_labeling_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_data_labeling_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_data_labeling_job( - job_service.GetDataLabelingJobRequest(), - name='name_value', - ) - - -def test_list_data_labeling_jobs(transport: str = 'grpc', request_type=job_service.ListDataLabelingJobsRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_labeling_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.ListDataLabelingJobsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_data_labeling_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ListDataLabelingJobsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListDataLabelingJobsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_data_labeling_jobs_from_dict(): - test_list_data_labeling_jobs(request_type=dict) - - -def test_list_data_labeling_jobs_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_labeling_jobs), - '__call__') as call: - client.list_data_labeling_jobs() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ListDataLabelingJobsRequest() - - -@pytest.mark.asyncio -async def test_list_data_labeling_jobs_async(transport: str = 'grpc_asyncio', request_type=job_service.ListDataLabelingJobsRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_labeling_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListDataLabelingJobsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_data_labeling_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ListDataLabelingJobsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListDataLabelingJobsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_data_labeling_jobs_async_from_dict(): - await test_list_data_labeling_jobs_async(request_type=dict) - - -def test_list_data_labeling_jobs_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.ListDataLabelingJobsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_labeling_jobs), - '__call__') as call: - call.return_value = job_service.ListDataLabelingJobsResponse() - client.list_data_labeling_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_data_labeling_jobs_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.ListDataLabelingJobsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_labeling_jobs), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListDataLabelingJobsResponse()) - await client.list_data_labeling_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_data_labeling_jobs_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_labeling_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.ListDataLabelingJobsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_data_labeling_jobs( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_data_labeling_jobs_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_data_labeling_jobs( - job_service.ListDataLabelingJobsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_data_labeling_jobs_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_labeling_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.ListDataLabelingJobsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListDataLabelingJobsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_data_labeling_jobs( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_data_labeling_jobs_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_data_labeling_jobs( - job_service.ListDataLabelingJobsRequest(), - parent='parent_value', - ) - - -def test_list_data_labeling_jobs_pager(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_labeling_jobs), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[ - data_labeling_job.DataLabelingJob(), - data_labeling_job.DataLabelingJob(), - data_labeling_job.DataLabelingJob(), - ], - next_page_token='abc', - ), - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[], - next_page_token='def', - ), - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[ - data_labeling_job.DataLabelingJob(), - ], - next_page_token='ghi', - ), - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[ - data_labeling_job.DataLabelingJob(), - data_labeling_job.DataLabelingJob(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_data_labeling_jobs(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, data_labeling_job.DataLabelingJob) - for i in results) - -def test_list_data_labeling_jobs_pages(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_labeling_jobs), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[ - data_labeling_job.DataLabelingJob(), - data_labeling_job.DataLabelingJob(), - data_labeling_job.DataLabelingJob(), - ], - next_page_token='abc', - ), - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[], - next_page_token='def', - ), - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[ - data_labeling_job.DataLabelingJob(), - ], - next_page_token='ghi', - ), - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[ - data_labeling_job.DataLabelingJob(), - data_labeling_job.DataLabelingJob(), - ], - ), - RuntimeError, - ) - pages = list(client.list_data_labeling_jobs(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_data_labeling_jobs_async_pager(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_labeling_jobs), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[ - data_labeling_job.DataLabelingJob(), - data_labeling_job.DataLabelingJob(), - data_labeling_job.DataLabelingJob(), - ], - next_page_token='abc', - ), - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[], - next_page_token='def', - ), - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[ - data_labeling_job.DataLabelingJob(), - ], - next_page_token='ghi', - ), - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[ - data_labeling_job.DataLabelingJob(), - data_labeling_job.DataLabelingJob(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_data_labeling_jobs(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, data_labeling_job.DataLabelingJob) - for i in responses) - -@pytest.mark.asyncio -async def test_list_data_labeling_jobs_async_pages(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_labeling_jobs), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[ - data_labeling_job.DataLabelingJob(), - data_labeling_job.DataLabelingJob(), - data_labeling_job.DataLabelingJob(), - ], - next_page_token='abc', - ), - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[], - next_page_token='def', - ), - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[ - data_labeling_job.DataLabelingJob(), - ], - next_page_token='ghi', - ), - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[ - data_labeling_job.DataLabelingJob(), - data_labeling_job.DataLabelingJob(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_data_labeling_jobs(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_delete_data_labeling_job(transport: str = 'grpc', request_type=job_service.DeleteDataLabelingJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.DeleteDataLabelingJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_data_labeling_job_from_dict(): - test_delete_data_labeling_job(request_type=dict) - - -def test_delete_data_labeling_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_data_labeling_job), - '__call__') as call: - client.delete_data_labeling_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.DeleteDataLabelingJobRequest() - - -@pytest.mark.asyncio -async def test_delete_data_labeling_job_async(transport: str = 'grpc_asyncio', request_type=job_service.DeleteDataLabelingJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.DeleteDataLabelingJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_data_labeling_job_async_from_dict(): - await test_delete_data_labeling_job_async(request_type=dict) - - -def test_delete_data_labeling_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.DeleteDataLabelingJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_data_labeling_job), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_data_labeling_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.DeleteDataLabelingJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_data_labeling_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_data_labeling_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_data_labeling_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_delete_data_labeling_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_data_labeling_job( - job_service.DeleteDataLabelingJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_data_labeling_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_data_labeling_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_data_labeling_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_data_labeling_job( - job_service.DeleteDataLabelingJobRequest(), - name='name_value', - ) - - -def test_cancel_data_labeling_job(transport: str = 'grpc', request_type=job_service.CancelDataLabelingJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.cancel_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CancelDataLabelingJobRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -def test_cancel_data_labeling_job_from_dict(): - test_cancel_data_labeling_job(request_type=dict) - - -def test_cancel_data_labeling_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_data_labeling_job), - '__call__') as call: - client.cancel_data_labeling_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CancelDataLabelingJobRequest() - - -@pytest.mark.asyncio -async def test_cancel_data_labeling_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CancelDataLabelingJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.cancel_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CancelDataLabelingJobRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_cancel_data_labeling_job_async_from_dict(): - await test_cancel_data_labeling_job_async(request_type=dict) - - -def test_cancel_data_labeling_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CancelDataLabelingJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_data_labeling_job), - '__call__') as call: - call.return_value = None - client.cancel_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_cancel_data_labeling_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CancelDataLabelingJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_data_labeling_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.cancel_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_cancel_data_labeling_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.cancel_data_labeling_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_cancel_data_labeling_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.cancel_data_labeling_job( - job_service.CancelDataLabelingJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_cancel_data_labeling_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.cancel_data_labeling_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_cancel_data_labeling_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.cancel_data_labeling_job( - job_service.CancelDataLabelingJobRequest(), - name='name_value', - ) - - -def test_create_hyperparameter_tuning_job(transport: str = 'grpc', request_type=job_service.CreateHyperparameterTuningJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_hyperparameter_tuning_job.HyperparameterTuningJob( - name='name_value', - display_name='display_name_value', - max_trial_count=1609, - parallel_trial_count=2128, - max_failed_trial_count=2317, - state=job_state.JobState.JOB_STATE_QUEUED, - ) - response = client.create_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CreateHyperparameterTuningJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_hyperparameter_tuning_job.HyperparameterTuningJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.max_trial_count == 1609 - assert response.parallel_trial_count == 2128 - assert response.max_failed_trial_count == 2317 - assert response.state == job_state.JobState.JOB_STATE_QUEUED - - -def test_create_hyperparameter_tuning_job_from_dict(): - test_create_hyperparameter_tuning_job(request_type=dict) - - -def test_create_hyperparameter_tuning_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_hyperparameter_tuning_job), - '__call__') as call: - client.create_hyperparameter_tuning_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CreateHyperparameterTuningJobRequest() - - -@pytest.mark.asyncio -async def test_create_hyperparameter_tuning_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CreateHyperparameterTuningJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_hyperparameter_tuning_job.HyperparameterTuningJob( - name='name_value', - display_name='display_name_value', - max_trial_count=1609, - parallel_trial_count=2128, - max_failed_trial_count=2317, - state=job_state.JobState.JOB_STATE_QUEUED, - )) - response = await client.create_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CreateHyperparameterTuningJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_hyperparameter_tuning_job.HyperparameterTuningJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.max_trial_count == 1609 - assert response.parallel_trial_count == 2128 - assert response.max_failed_trial_count == 2317 - assert response.state == job_state.JobState.JOB_STATE_QUEUED - - -@pytest.mark.asyncio -async def test_create_hyperparameter_tuning_job_async_from_dict(): - await test_create_hyperparameter_tuning_job_async(request_type=dict) - - -def test_create_hyperparameter_tuning_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CreateHyperparameterTuningJobRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_hyperparameter_tuning_job), - '__call__') as call: - call.return_value = gca_hyperparameter_tuning_job.HyperparameterTuningJob() - client.create_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_hyperparameter_tuning_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CreateHyperparameterTuningJobRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_hyperparameter_tuning_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_hyperparameter_tuning_job.HyperparameterTuningJob()) - await client.create_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_hyperparameter_tuning_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_hyperparameter_tuning_job.HyperparameterTuningJob() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_hyperparameter_tuning_job( - parent='parent_value', - hyperparameter_tuning_job=gca_hyperparameter_tuning_job.HyperparameterTuningJob(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].hyperparameter_tuning_job - mock_val = gca_hyperparameter_tuning_job.HyperparameterTuningJob(name='name_value') - assert arg == mock_val - - -def test_create_hyperparameter_tuning_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_hyperparameter_tuning_job( - job_service.CreateHyperparameterTuningJobRequest(), - parent='parent_value', - hyperparameter_tuning_job=gca_hyperparameter_tuning_job.HyperparameterTuningJob(name='name_value'), - ) - - -@pytest.mark.asyncio -async def test_create_hyperparameter_tuning_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_hyperparameter_tuning_job.HyperparameterTuningJob() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_hyperparameter_tuning_job.HyperparameterTuningJob()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_hyperparameter_tuning_job( - parent='parent_value', - hyperparameter_tuning_job=gca_hyperparameter_tuning_job.HyperparameterTuningJob(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].hyperparameter_tuning_job - mock_val = gca_hyperparameter_tuning_job.HyperparameterTuningJob(name='name_value') - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_hyperparameter_tuning_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_hyperparameter_tuning_job( - job_service.CreateHyperparameterTuningJobRequest(), - parent='parent_value', - hyperparameter_tuning_job=gca_hyperparameter_tuning_job.HyperparameterTuningJob(name='name_value'), - ) - - -def test_get_hyperparameter_tuning_job(transport: str = 'grpc', request_type=job_service.GetHyperparameterTuningJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = hyperparameter_tuning_job.HyperparameterTuningJob( - name='name_value', - display_name='display_name_value', - max_trial_count=1609, - parallel_trial_count=2128, - max_failed_trial_count=2317, - state=job_state.JobState.JOB_STATE_QUEUED, - ) - response = client.get_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.GetHyperparameterTuningJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, hyperparameter_tuning_job.HyperparameterTuningJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.max_trial_count == 1609 - assert response.parallel_trial_count == 2128 - assert response.max_failed_trial_count == 2317 - assert response.state == job_state.JobState.JOB_STATE_QUEUED - - -def test_get_hyperparameter_tuning_job_from_dict(): - test_get_hyperparameter_tuning_job(request_type=dict) - - -def test_get_hyperparameter_tuning_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_hyperparameter_tuning_job), - '__call__') as call: - client.get_hyperparameter_tuning_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.GetHyperparameterTuningJobRequest() - - -@pytest.mark.asyncio -async def test_get_hyperparameter_tuning_job_async(transport: str = 'grpc_asyncio', request_type=job_service.GetHyperparameterTuningJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(hyperparameter_tuning_job.HyperparameterTuningJob( - name='name_value', - display_name='display_name_value', - max_trial_count=1609, - parallel_trial_count=2128, - max_failed_trial_count=2317, - state=job_state.JobState.JOB_STATE_QUEUED, - )) - response = await client.get_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.GetHyperparameterTuningJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, hyperparameter_tuning_job.HyperparameterTuningJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.max_trial_count == 1609 - assert response.parallel_trial_count == 2128 - assert response.max_failed_trial_count == 2317 - assert response.state == job_state.JobState.JOB_STATE_QUEUED - - -@pytest.mark.asyncio -async def test_get_hyperparameter_tuning_job_async_from_dict(): - await test_get_hyperparameter_tuning_job_async(request_type=dict) - - -def test_get_hyperparameter_tuning_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.GetHyperparameterTuningJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_hyperparameter_tuning_job), - '__call__') as call: - call.return_value = hyperparameter_tuning_job.HyperparameterTuningJob() - client.get_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_hyperparameter_tuning_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.GetHyperparameterTuningJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_hyperparameter_tuning_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(hyperparameter_tuning_job.HyperparameterTuningJob()) - await client.get_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_hyperparameter_tuning_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = hyperparameter_tuning_job.HyperparameterTuningJob() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_hyperparameter_tuning_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_hyperparameter_tuning_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_hyperparameter_tuning_job( - job_service.GetHyperparameterTuningJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_hyperparameter_tuning_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = hyperparameter_tuning_job.HyperparameterTuningJob() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(hyperparameter_tuning_job.HyperparameterTuningJob()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_hyperparameter_tuning_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_hyperparameter_tuning_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_hyperparameter_tuning_job( - job_service.GetHyperparameterTuningJobRequest(), - name='name_value', - ) - - -def test_list_hyperparameter_tuning_jobs(transport: str = 'grpc', request_type=job_service.ListHyperparameterTuningJobsRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_hyperparameter_tuning_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.ListHyperparameterTuningJobsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_hyperparameter_tuning_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ListHyperparameterTuningJobsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListHyperparameterTuningJobsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_hyperparameter_tuning_jobs_from_dict(): - test_list_hyperparameter_tuning_jobs(request_type=dict) - - -def test_list_hyperparameter_tuning_jobs_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_hyperparameter_tuning_jobs), - '__call__') as call: - client.list_hyperparameter_tuning_jobs() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ListHyperparameterTuningJobsRequest() - - -@pytest.mark.asyncio -async def test_list_hyperparameter_tuning_jobs_async(transport: str = 'grpc_asyncio', request_type=job_service.ListHyperparameterTuningJobsRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_hyperparameter_tuning_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListHyperparameterTuningJobsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_hyperparameter_tuning_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ListHyperparameterTuningJobsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListHyperparameterTuningJobsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_hyperparameter_tuning_jobs_async_from_dict(): - await test_list_hyperparameter_tuning_jobs_async(request_type=dict) - - -def test_list_hyperparameter_tuning_jobs_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.ListHyperparameterTuningJobsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_hyperparameter_tuning_jobs), - '__call__') as call: - call.return_value = job_service.ListHyperparameterTuningJobsResponse() - client.list_hyperparameter_tuning_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_hyperparameter_tuning_jobs_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.ListHyperparameterTuningJobsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_hyperparameter_tuning_jobs), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListHyperparameterTuningJobsResponse()) - await client.list_hyperparameter_tuning_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_hyperparameter_tuning_jobs_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_hyperparameter_tuning_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.ListHyperparameterTuningJobsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_hyperparameter_tuning_jobs( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_hyperparameter_tuning_jobs_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_hyperparameter_tuning_jobs( - job_service.ListHyperparameterTuningJobsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_hyperparameter_tuning_jobs_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_hyperparameter_tuning_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.ListHyperparameterTuningJobsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListHyperparameterTuningJobsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_hyperparameter_tuning_jobs( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_hyperparameter_tuning_jobs_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_hyperparameter_tuning_jobs( - job_service.ListHyperparameterTuningJobsRequest(), - parent='parent_value', - ) - - -def test_list_hyperparameter_tuning_jobs_pager(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_hyperparameter_tuning_jobs), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[ - hyperparameter_tuning_job.HyperparameterTuningJob(), - hyperparameter_tuning_job.HyperparameterTuningJob(), - hyperparameter_tuning_job.HyperparameterTuningJob(), - ], - next_page_token='abc', - ), - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[], - next_page_token='def', - ), - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[ - hyperparameter_tuning_job.HyperparameterTuningJob(), - ], - next_page_token='ghi', - ), - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[ - hyperparameter_tuning_job.HyperparameterTuningJob(), - hyperparameter_tuning_job.HyperparameterTuningJob(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_hyperparameter_tuning_jobs(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, hyperparameter_tuning_job.HyperparameterTuningJob) - for i in results) - -def test_list_hyperparameter_tuning_jobs_pages(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_hyperparameter_tuning_jobs), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[ - hyperparameter_tuning_job.HyperparameterTuningJob(), - hyperparameter_tuning_job.HyperparameterTuningJob(), - hyperparameter_tuning_job.HyperparameterTuningJob(), - ], - next_page_token='abc', - ), - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[], - next_page_token='def', - ), - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[ - hyperparameter_tuning_job.HyperparameterTuningJob(), - ], - next_page_token='ghi', - ), - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[ - hyperparameter_tuning_job.HyperparameterTuningJob(), - hyperparameter_tuning_job.HyperparameterTuningJob(), - ], - ), - RuntimeError, - ) - pages = list(client.list_hyperparameter_tuning_jobs(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_hyperparameter_tuning_jobs_async_pager(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_hyperparameter_tuning_jobs), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[ - hyperparameter_tuning_job.HyperparameterTuningJob(), - hyperparameter_tuning_job.HyperparameterTuningJob(), - hyperparameter_tuning_job.HyperparameterTuningJob(), - ], - next_page_token='abc', - ), - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[], - next_page_token='def', - ), - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[ - hyperparameter_tuning_job.HyperparameterTuningJob(), - ], - next_page_token='ghi', - ), - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[ - hyperparameter_tuning_job.HyperparameterTuningJob(), - hyperparameter_tuning_job.HyperparameterTuningJob(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_hyperparameter_tuning_jobs(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, hyperparameter_tuning_job.HyperparameterTuningJob) - for i in responses) - -@pytest.mark.asyncio -async def test_list_hyperparameter_tuning_jobs_async_pages(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_hyperparameter_tuning_jobs), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[ - hyperparameter_tuning_job.HyperparameterTuningJob(), - hyperparameter_tuning_job.HyperparameterTuningJob(), - hyperparameter_tuning_job.HyperparameterTuningJob(), - ], - next_page_token='abc', - ), - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[], - next_page_token='def', - ), - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[ - hyperparameter_tuning_job.HyperparameterTuningJob(), - ], - next_page_token='ghi', - ), - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[ - hyperparameter_tuning_job.HyperparameterTuningJob(), - hyperparameter_tuning_job.HyperparameterTuningJob(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_hyperparameter_tuning_jobs(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_delete_hyperparameter_tuning_job(transport: str = 'grpc', request_type=job_service.DeleteHyperparameterTuningJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.DeleteHyperparameterTuningJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_hyperparameter_tuning_job_from_dict(): - test_delete_hyperparameter_tuning_job(request_type=dict) - - -def test_delete_hyperparameter_tuning_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_hyperparameter_tuning_job), - '__call__') as call: - client.delete_hyperparameter_tuning_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.DeleteHyperparameterTuningJobRequest() - - -@pytest.mark.asyncio -async def test_delete_hyperparameter_tuning_job_async(transport: str = 'grpc_asyncio', request_type=job_service.DeleteHyperparameterTuningJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.DeleteHyperparameterTuningJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_hyperparameter_tuning_job_async_from_dict(): - await test_delete_hyperparameter_tuning_job_async(request_type=dict) - - -def test_delete_hyperparameter_tuning_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.DeleteHyperparameterTuningJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_hyperparameter_tuning_job), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_hyperparameter_tuning_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.DeleteHyperparameterTuningJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_hyperparameter_tuning_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_hyperparameter_tuning_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_hyperparameter_tuning_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_delete_hyperparameter_tuning_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_hyperparameter_tuning_job( - job_service.DeleteHyperparameterTuningJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_hyperparameter_tuning_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_hyperparameter_tuning_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_hyperparameter_tuning_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_hyperparameter_tuning_job( - job_service.DeleteHyperparameterTuningJobRequest(), - name='name_value', - ) - - -def test_cancel_hyperparameter_tuning_job(transport: str = 'grpc', request_type=job_service.CancelHyperparameterTuningJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.cancel_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CancelHyperparameterTuningJobRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -def test_cancel_hyperparameter_tuning_job_from_dict(): - test_cancel_hyperparameter_tuning_job(request_type=dict) - - -def test_cancel_hyperparameter_tuning_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_hyperparameter_tuning_job), - '__call__') as call: - client.cancel_hyperparameter_tuning_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CancelHyperparameterTuningJobRequest() - - -@pytest.mark.asyncio -async def test_cancel_hyperparameter_tuning_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CancelHyperparameterTuningJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.cancel_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CancelHyperparameterTuningJobRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_cancel_hyperparameter_tuning_job_async_from_dict(): - await test_cancel_hyperparameter_tuning_job_async(request_type=dict) - - -def test_cancel_hyperparameter_tuning_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CancelHyperparameterTuningJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_hyperparameter_tuning_job), - '__call__') as call: - call.return_value = None - client.cancel_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_cancel_hyperparameter_tuning_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CancelHyperparameterTuningJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_hyperparameter_tuning_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.cancel_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_cancel_hyperparameter_tuning_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.cancel_hyperparameter_tuning_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_cancel_hyperparameter_tuning_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.cancel_hyperparameter_tuning_job( - job_service.CancelHyperparameterTuningJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_cancel_hyperparameter_tuning_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.cancel_hyperparameter_tuning_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_cancel_hyperparameter_tuning_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.cancel_hyperparameter_tuning_job( - job_service.CancelHyperparameterTuningJobRequest(), - name='name_value', - ) - - -def test_create_batch_prediction_job(transport: str = 'grpc', request_type=job_service.CreateBatchPredictionJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_batch_prediction_job.BatchPredictionJob( - name='name_value', - display_name='display_name_value', - model='model_value', - generate_explanation=True, - state=job_state.JobState.JOB_STATE_QUEUED, - ) - response = client.create_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CreateBatchPredictionJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_batch_prediction_job.BatchPredictionJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.model == 'model_value' - assert response.generate_explanation is True - assert response.state == job_state.JobState.JOB_STATE_QUEUED - - -def test_create_batch_prediction_job_from_dict(): - test_create_batch_prediction_job(request_type=dict) - - -def test_create_batch_prediction_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_batch_prediction_job), - '__call__') as call: - client.create_batch_prediction_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CreateBatchPredictionJobRequest() - - -@pytest.mark.asyncio -async def test_create_batch_prediction_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CreateBatchPredictionJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_batch_prediction_job.BatchPredictionJob( - name='name_value', - display_name='display_name_value', - model='model_value', - generate_explanation=True, - state=job_state.JobState.JOB_STATE_QUEUED, - )) - response = await client.create_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CreateBatchPredictionJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_batch_prediction_job.BatchPredictionJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.model == 'model_value' - assert response.generate_explanation is True - assert response.state == job_state.JobState.JOB_STATE_QUEUED - - -@pytest.mark.asyncio -async def test_create_batch_prediction_job_async_from_dict(): - await test_create_batch_prediction_job_async(request_type=dict) - - -def test_create_batch_prediction_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CreateBatchPredictionJobRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_batch_prediction_job), - '__call__') as call: - call.return_value = gca_batch_prediction_job.BatchPredictionJob() - client.create_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_batch_prediction_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CreateBatchPredictionJobRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_batch_prediction_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_batch_prediction_job.BatchPredictionJob()) - await client.create_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_batch_prediction_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_batch_prediction_job.BatchPredictionJob() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_batch_prediction_job( - parent='parent_value', - batch_prediction_job=gca_batch_prediction_job.BatchPredictionJob(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].batch_prediction_job - mock_val = gca_batch_prediction_job.BatchPredictionJob(name='name_value') - assert arg == mock_val - - -def test_create_batch_prediction_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_batch_prediction_job( - job_service.CreateBatchPredictionJobRequest(), - parent='parent_value', - batch_prediction_job=gca_batch_prediction_job.BatchPredictionJob(name='name_value'), - ) - - -@pytest.mark.asyncio -async def test_create_batch_prediction_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_batch_prediction_job.BatchPredictionJob() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_batch_prediction_job.BatchPredictionJob()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_batch_prediction_job( - parent='parent_value', - batch_prediction_job=gca_batch_prediction_job.BatchPredictionJob(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].batch_prediction_job - mock_val = gca_batch_prediction_job.BatchPredictionJob(name='name_value') - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_batch_prediction_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_batch_prediction_job( - job_service.CreateBatchPredictionJobRequest(), - parent='parent_value', - batch_prediction_job=gca_batch_prediction_job.BatchPredictionJob(name='name_value'), - ) - - -def test_get_batch_prediction_job(transport: str = 'grpc', request_type=job_service.GetBatchPredictionJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = batch_prediction_job.BatchPredictionJob( - name='name_value', - display_name='display_name_value', - model='model_value', - generate_explanation=True, - state=job_state.JobState.JOB_STATE_QUEUED, - ) - response = client.get_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.GetBatchPredictionJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, batch_prediction_job.BatchPredictionJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.model == 'model_value' - assert response.generate_explanation is True - assert response.state == job_state.JobState.JOB_STATE_QUEUED - - -def test_get_batch_prediction_job_from_dict(): - test_get_batch_prediction_job(request_type=dict) - - -def test_get_batch_prediction_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_batch_prediction_job), - '__call__') as call: - client.get_batch_prediction_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.GetBatchPredictionJobRequest() - - -@pytest.mark.asyncio -async def test_get_batch_prediction_job_async(transport: str = 'grpc_asyncio', request_type=job_service.GetBatchPredictionJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(batch_prediction_job.BatchPredictionJob( - name='name_value', - display_name='display_name_value', - model='model_value', - generate_explanation=True, - state=job_state.JobState.JOB_STATE_QUEUED, - )) - response = await client.get_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.GetBatchPredictionJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, batch_prediction_job.BatchPredictionJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.model == 'model_value' - assert response.generate_explanation is True - assert response.state == job_state.JobState.JOB_STATE_QUEUED - - -@pytest.mark.asyncio -async def test_get_batch_prediction_job_async_from_dict(): - await test_get_batch_prediction_job_async(request_type=dict) - - -def test_get_batch_prediction_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.GetBatchPredictionJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_batch_prediction_job), - '__call__') as call: - call.return_value = batch_prediction_job.BatchPredictionJob() - client.get_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_batch_prediction_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.GetBatchPredictionJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_batch_prediction_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(batch_prediction_job.BatchPredictionJob()) - await client.get_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_batch_prediction_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = batch_prediction_job.BatchPredictionJob() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_batch_prediction_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_batch_prediction_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_batch_prediction_job( - job_service.GetBatchPredictionJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_batch_prediction_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = batch_prediction_job.BatchPredictionJob() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(batch_prediction_job.BatchPredictionJob()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_batch_prediction_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_batch_prediction_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_batch_prediction_job( - job_service.GetBatchPredictionJobRequest(), - name='name_value', - ) - - -def test_list_batch_prediction_jobs(transport: str = 'grpc', request_type=job_service.ListBatchPredictionJobsRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_batch_prediction_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.ListBatchPredictionJobsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_batch_prediction_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ListBatchPredictionJobsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListBatchPredictionJobsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_batch_prediction_jobs_from_dict(): - test_list_batch_prediction_jobs(request_type=dict) - - -def test_list_batch_prediction_jobs_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_batch_prediction_jobs), - '__call__') as call: - client.list_batch_prediction_jobs() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ListBatchPredictionJobsRequest() - - -@pytest.mark.asyncio -async def test_list_batch_prediction_jobs_async(transport: str = 'grpc_asyncio', request_type=job_service.ListBatchPredictionJobsRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_batch_prediction_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListBatchPredictionJobsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_batch_prediction_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ListBatchPredictionJobsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListBatchPredictionJobsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_batch_prediction_jobs_async_from_dict(): - await test_list_batch_prediction_jobs_async(request_type=dict) - - -def test_list_batch_prediction_jobs_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.ListBatchPredictionJobsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_batch_prediction_jobs), - '__call__') as call: - call.return_value = job_service.ListBatchPredictionJobsResponse() - client.list_batch_prediction_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_batch_prediction_jobs_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.ListBatchPredictionJobsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_batch_prediction_jobs), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListBatchPredictionJobsResponse()) - await client.list_batch_prediction_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_batch_prediction_jobs_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_batch_prediction_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.ListBatchPredictionJobsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_batch_prediction_jobs( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_batch_prediction_jobs_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_batch_prediction_jobs( - job_service.ListBatchPredictionJobsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_batch_prediction_jobs_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_batch_prediction_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.ListBatchPredictionJobsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListBatchPredictionJobsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_batch_prediction_jobs( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_batch_prediction_jobs_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_batch_prediction_jobs( - job_service.ListBatchPredictionJobsRequest(), - parent='parent_value', - ) - - -def test_list_batch_prediction_jobs_pager(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_batch_prediction_jobs), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[ - batch_prediction_job.BatchPredictionJob(), - batch_prediction_job.BatchPredictionJob(), - batch_prediction_job.BatchPredictionJob(), - ], - next_page_token='abc', - ), - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[], - next_page_token='def', - ), - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[ - batch_prediction_job.BatchPredictionJob(), - ], - next_page_token='ghi', - ), - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[ - batch_prediction_job.BatchPredictionJob(), - batch_prediction_job.BatchPredictionJob(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_batch_prediction_jobs(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, batch_prediction_job.BatchPredictionJob) - for i in results) - -def test_list_batch_prediction_jobs_pages(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_batch_prediction_jobs), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[ - batch_prediction_job.BatchPredictionJob(), - batch_prediction_job.BatchPredictionJob(), - batch_prediction_job.BatchPredictionJob(), - ], - next_page_token='abc', - ), - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[], - next_page_token='def', - ), - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[ - batch_prediction_job.BatchPredictionJob(), - ], - next_page_token='ghi', - ), - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[ - batch_prediction_job.BatchPredictionJob(), - batch_prediction_job.BatchPredictionJob(), - ], - ), - RuntimeError, - ) - pages = list(client.list_batch_prediction_jobs(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_batch_prediction_jobs_async_pager(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_batch_prediction_jobs), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[ - batch_prediction_job.BatchPredictionJob(), - batch_prediction_job.BatchPredictionJob(), - batch_prediction_job.BatchPredictionJob(), - ], - next_page_token='abc', - ), - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[], - next_page_token='def', - ), - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[ - batch_prediction_job.BatchPredictionJob(), - ], - next_page_token='ghi', - ), - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[ - batch_prediction_job.BatchPredictionJob(), - batch_prediction_job.BatchPredictionJob(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_batch_prediction_jobs(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, batch_prediction_job.BatchPredictionJob) - for i in responses) - -@pytest.mark.asyncio -async def test_list_batch_prediction_jobs_async_pages(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_batch_prediction_jobs), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[ - batch_prediction_job.BatchPredictionJob(), - batch_prediction_job.BatchPredictionJob(), - batch_prediction_job.BatchPredictionJob(), - ], - next_page_token='abc', - ), - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[], - next_page_token='def', - ), - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[ - batch_prediction_job.BatchPredictionJob(), - ], - next_page_token='ghi', - ), - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[ - batch_prediction_job.BatchPredictionJob(), - batch_prediction_job.BatchPredictionJob(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_batch_prediction_jobs(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_delete_batch_prediction_job(transport: str = 'grpc', request_type=job_service.DeleteBatchPredictionJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.DeleteBatchPredictionJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_batch_prediction_job_from_dict(): - test_delete_batch_prediction_job(request_type=dict) - - -def test_delete_batch_prediction_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_batch_prediction_job), - '__call__') as call: - client.delete_batch_prediction_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.DeleteBatchPredictionJobRequest() - - -@pytest.mark.asyncio -async def test_delete_batch_prediction_job_async(transport: str = 'grpc_asyncio', request_type=job_service.DeleteBatchPredictionJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.DeleteBatchPredictionJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_batch_prediction_job_async_from_dict(): - await test_delete_batch_prediction_job_async(request_type=dict) - - -def test_delete_batch_prediction_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.DeleteBatchPredictionJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_batch_prediction_job), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_batch_prediction_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.DeleteBatchPredictionJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_batch_prediction_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_batch_prediction_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_batch_prediction_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_delete_batch_prediction_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_batch_prediction_job( - job_service.DeleteBatchPredictionJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_batch_prediction_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_batch_prediction_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_batch_prediction_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_batch_prediction_job( - job_service.DeleteBatchPredictionJobRequest(), - name='name_value', - ) - - -def test_cancel_batch_prediction_job(transport: str = 'grpc', request_type=job_service.CancelBatchPredictionJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.cancel_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CancelBatchPredictionJobRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -def test_cancel_batch_prediction_job_from_dict(): - test_cancel_batch_prediction_job(request_type=dict) - - -def test_cancel_batch_prediction_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_batch_prediction_job), - '__call__') as call: - client.cancel_batch_prediction_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CancelBatchPredictionJobRequest() - - -@pytest.mark.asyncio -async def test_cancel_batch_prediction_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CancelBatchPredictionJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.cancel_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CancelBatchPredictionJobRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_cancel_batch_prediction_job_async_from_dict(): - await test_cancel_batch_prediction_job_async(request_type=dict) - - -def test_cancel_batch_prediction_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CancelBatchPredictionJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_batch_prediction_job), - '__call__') as call: - call.return_value = None - client.cancel_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_cancel_batch_prediction_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CancelBatchPredictionJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_batch_prediction_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.cancel_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_cancel_batch_prediction_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.cancel_batch_prediction_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_cancel_batch_prediction_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.cancel_batch_prediction_job( - job_service.CancelBatchPredictionJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_cancel_batch_prediction_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.cancel_batch_prediction_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_cancel_batch_prediction_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.cancel_batch_prediction_job( - job_service.CancelBatchPredictionJobRequest(), - name='name_value', - ) - - -def test_create_model_deployment_monitoring_job(transport: str = 'grpc', request_type=job_service.CreateModelDeploymentMonitoringJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob( - name='name_value', - display_name='display_name_value', - endpoint='endpoint_value', - state=job_state.JobState.JOB_STATE_QUEUED, - schedule_state=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING, - predict_instance_schema_uri='predict_instance_schema_uri_value', - analysis_instance_schema_uri='analysis_instance_schema_uri_value', - enable_monitoring_pipeline_logs=True, - ) - response = client.create_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CreateModelDeploymentMonitoringJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.endpoint == 'endpoint_value' - assert response.state == job_state.JobState.JOB_STATE_QUEUED - assert response.schedule_state == gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING - assert response.predict_instance_schema_uri == 'predict_instance_schema_uri_value' - assert response.analysis_instance_schema_uri == 'analysis_instance_schema_uri_value' - assert response.enable_monitoring_pipeline_logs is True - - -def test_create_model_deployment_monitoring_job_from_dict(): - test_create_model_deployment_monitoring_job(request_type=dict) - - -def test_create_model_deployment_monitoring_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_model_deployment_monitoring_job), - '__call__') as call: - client.create_model_deployment_monitoring_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CreateModelDeploymentMonitoringJobRequest() - - -@pytest.mark.asyncio -async def test_create_model_deployment_monitoring_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CreateModelDeploymentMonitoringJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob( - name='name_value', - display_name='display_name_value', - endpoint='endpoint_value', - state=job_state.JobState.JOB_STATE_QUEUED, - schedule_state=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING, - predict_instance_schema_uri='predict_instance_schema_uri_value', - analysis_instance_schema_uri='analysis_instance_schema_uri_value', - enable_monitoring_pipeline_logs=True, - )) - response = await client.create_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CreateModelDeploymentMonitoringJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.endpoint == 'endpoint_value' - assert response.state == job_state.JobState.JOB_STATE_QUEUED - assert response.schedule_state == gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING - assert response.predict_instance_schema_uri == 'predict_instance_schema_uri_value' - assert response.analysis_instance_schema_uri == 'analysis_instance_schema_uri_value' - assert response.enable_monitoring_pipeline_logs is True - - -@pytest.mark.asyncio -async def test_create_model_deployment_monitoring_job_async_from_dict(): - await test_create_model_deployment_monitoring_job_async(request_type=dict) - - -def test_create_model_deployment_monitoring_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CreateModelDeploymentMonitoringJobRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_model_deployment_monitoring_job), - '__call__') as call: - call.return_value = gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob() - client.create_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_model_deployment_monitoring_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CreateModelDeploymentMonitoringJobRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_model_deployment_monitoring_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob()) - await client.create_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_model_deployment_monitoring_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_model_deployment_monitoring_job( - parent='parent_value', - model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].model_deployment_monitoring_job - mock_val = gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value') - assert arg == mock_val - - -def test_create_model_deployment_monitoring_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_model_deployment_monitoring_job( - job_service.CreateModelDeploymentMonitoringJobRequest(), - parent='parent_value', - model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value'), - ) - - -@pytest.mark.asyncio -async def test_create_model_deployment_monitoring_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_model_deployment_monitoring_job( - parent='parent_value', - model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].model_deployment_monitoring_job - mock_val = gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value') - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_model_deployment_monitoring_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_model_deployment_monitoring_job( - job_service.CreateModelDeploymentMonitoringJobRequest(), - parent='parent_value', - model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value'), - ) - - -def test_search_model_deployment_monitoring_stats_anomalies(transport: str = 'grpc', request_type=job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_model_deployment_monitoring_stats_anomalies), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( - next_page_token='next_page_token_value', - ) - response = client.search_model_deployment_monitoring_stats_anomalies(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.SearchModelDeploymentMonitoringStatsAnomaliesPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_search_model_deployment_monitoring_stats_anomalies_from_dict(): - test_search_model_deployment_monitoring_stats_anomalies(request_type=dict) - - -def test_search_model_deployment_monitoring_stats_anomalies_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_model_deployment_monitoring_stats_anomalies), - '__call__') as call: - client.search_model_deployment_monitoring_stats_anomalies() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest() - - -@pytest.mark.asyncio -async def test_search_model_deployment_monitoring_stats_anomalies_async(transport: str = 'grpc_asyncio', request_type=job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_model_deployment_monitoring_stats_anomalies), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( - next_page_token='next_page_token_value', - )) - response = await client.search_model_deployment_monitoring_stats_anomalies(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.SearchModelDeploymentMonitoringStatsAnomaliesAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_search_model_deployment_monitoring_stats_anomalies_async_from_dict(): - await test_search_model_deployment_monitoring_stats_anomalies_async(request_type=dict) - - -def test_search_model_deployment_monitoring_stats_anomalies_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest() - - request.model_deployment_monitoring_job = 'model_deployment_monitoring_job/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_model_deployment_monitoring_stats_anomalies), - '__call__') as call: - call.return_value = job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse() - client.search_model_deployment_monitoring_stats_anomalies(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'model_deployment_monitoring_job=model_deployment_monitoring_job/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_search_model_deployment_monitoring_stats_anomalies_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest() - - request.model_deployment_monitoring_job = 'model_deployment_monitoring_job/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_model_deployment_monitoring_stats_anomalies), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse()) - await client.search_model_deployment_monitoring_stats_anomalies(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'model_deployment_monitoring_job=model_deployment_monitoring_job/value', - ) in kw['metadata'] - - -def test_search_model_deployment_monitoring_stats_anomalies_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_model_deployment_monitoring_stats_anomalies), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.search_model_deployment_monitoring_stats_anomalies( - model_deployment_monitoring_job='model_deployment_monitoring_job_value', - deployed_model_id='deployed_model_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].model_deployment_monitoring_job - mock_val = 'model_deployment_monitoring_job_value' - assert arg == mock_val - arg = args[0].deployed_model_id - mock_val = 'deployed_model_id_value' - assert arg == mock_val - - -def test_search_model_deployment_monitoring_stats_anomalies_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.search_model_deployment_monitoring_stats_anomalies( - job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest(), - model_deployment_monitoring_job='model_deployment_monitoring_job_value', - deployed_model_id='deployed_model_id_value', - ) - - -@pytest.mark.asyncio -async def test_search_model_deployment_monitoring_stats_anomalies_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_model_deployment_monitoring_stats_anomalies), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.search_model_deployment_monitoring_stats_anomalies( - model_deployment_monitoring_job='model_deployment_monitoring_job_value', - deployed_model_id='deployed_model_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].model_deployment_monitoring_job - mock_val = 'model_deployment_monitoring_job_value' - assert arg == mock_val - arg = args[0].deployed_model_id - mock_val = 'deployed_model_id_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_search_model_deployment_monitoring_stats_anomalies_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.search_model_deployment_monitoring_stats_anomalies( - job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest(), - model_deployment_monitoring_job='model_deployment_monitoring_job_value', - deployed_model_id='deployed_model_id_value', - ) - - -def test_search_model_deployment_monitoring_stats_anomalies_pager(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_model_deployment_monitoring_stats_anomalies), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( - monitoring_stats=[ - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - ], - next_page_token='abc', - ), - job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( - monitoring_stats=[], - next_page_token='def', - ), - job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( - monitoring_stats=[ - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - ], - next_page_token='ghi', - ), - job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( - monitoring_stats=[ - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('model_deployment_monitoring_job', ''), - )), - ) - pager = client.search_model_deployment_monitoring_stats_anomalies(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies) - for i in results) - -def test_search_model_deployment_monitoring_stats_anomalies_pages(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_model_deployment_monitoring_stats_anomalies), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( - monitoring_stats=[ - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - ], - next_page_token='abc', - ), - job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( - monitoring_stats=[], - next_page_token='def', - ), - job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( - monitoring_stats=[ - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - ], - next_page_token='ghi', - ), - job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( - monitoring_stats=[ - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - ], - ), - RuntimeError, - ) - pages = list(client.search_model_deployment_monitoring_stats_anomalies(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_search_model_deployment_monitoring_stats_anomalies_async_pager(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_model_deployment_monitoring_stats_anomalies), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( - monitoring_stats=[ - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - ], - next_page_token='abc', - ), - job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( - monitoring_stats=[], - next_page_token='def', - ), - job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( - monitoring_stats=[ - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - ], - next_page_token='ghi', - ), - job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( - monitoring_stats=[ - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - ], - ), - RuntimeError, - ) - async_pager = await client.search_model_deployment_monitoring_stats_anomalies(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies) - for i in responses) - -@pytest.mark.asyncio -async def test_search_model_deployment_monitoring_stats_anomalies_async_pages(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_model_deployment_monitoring_stats_anomalies), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( - monitoring_stats=[ - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - ], - next_page_token='abc', - ), - job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( - monitoring_stats=[], - next_page_token='def', - ), - job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( - monitoring_stats=[ - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - ], - next_page_token='ghi', - ), - job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( - monitoring_stats=[ - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.search_model_deployment_monitoring_stats_anomalies(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_get_model_deployment_monitoring_job(transport: str = 'grpc', request_type=job_service.GetModelDeploymentMonitoringJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_deployment_monitoring_job.ModelDeploymentMonitoringJob( - name='name_value', - display_name='display_name_value', - endpoint='endpoint_value', - state=job_state.JobState.JOB_STATE_QUEUED, - schedule_state=model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING, - predict_instance_schema_uri='predict_instance_schema_uri_value', - analysis_instance_schema_uri='analysis_instance_schema_uri_value', - enable_monitoring_pipeline_logs=True, - ) - response = client.get_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.GetModelDeploymentMonitoringJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, model_deployment_monitoring_job.ModelDeploymentMonitoringJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.endpoint == 'endpoint_value' - assert response.state == job_state.JobState.JOB_STATE_QUEUED - assert response.schedule_state == model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING - assert response.predict_instance_schema_uri == 'predict_instance_schema_uri_value' - assert response.analysis_instance_schema_uri == 'analysis_instance_schema_uri_value' - assert response.enable_monitoring_pipeline_logs is True - - -def test_get_model_deployment_monitoring_job_from_dict(): - test_get_model_deployment_monitoring_job(request_type=dict) - - -def test_get_model_deployment_monitoring_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_deployment_monitoring_job), - '__call__') as call: - client.get_model_deployment_monitoring_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.GetModelDeploymentMonitoringJobRequest() - - -@pytest.mark.asyncio -async def test_get_model_deployment_monitoring_job_async(transport: str = 'grpc_asyncio', request_type=job_service.GetModelDeploymentMonitoringJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model_deployment_monitoring_job.ModelDeploymentMonitoringJob( - name='name_value', - display_name='display_name_value', - endpoint='endpoint_value', - state=job_state.JobState.JOB_STATE_QUEUED, - schedule_state=model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING, - predict_instance_schema_uri='predict_instance_schema_uri_value', - analysis_instance_schema_uri='analysis_instance_schema_uri_value', - enable_monitoring_pipeline_logs=True, - )) - response = await client.get_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.GetModelDeploymentMonitoringJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, model_deployment_monitoring_job.ModelDeploymentMonitoringJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.endpoint == 'endpoint_value' - assert response.state == job_state.JobState.JOB_STATE_QUEUED - assert response.schedule_state == model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING - assert response.predict_instance_schema_uri == 'predict_instance_schema_uri_value' - assert response.analysis_instance_schema_uri == 'analysis_instance_schema_uri_value' - assert response.enable_monitoring_pipeline_logs is True - - -@pytest.mark.asyncio -async def test_get_model_deployment_monitoring_job_async_from_dict(): - await test_get_model_deployment_monitoring_job_async(request_type=dict) - - -def test_get_model_deployment_monitoring_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.GetModelDeploymentMonitoringJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_deployment_monitoring_job), - '__call__') as call: - call.return_value = model_deployment_monitoring_job.ModelDeploymentMonitoringJob() - client.get_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_model_deployment_monitoring_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.GetModelDeploymentMonitoringJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_deployment_monitoring_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_deployment_monitoring_job.ModelDeploymentMonitoringJob()) - await client.get_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_model_deployment_monitoring_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_deployment_monitoring_job.ModelDeploymentMonitoringJob() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_model_deployment_monitoring_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_model_deployment_monitoring_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_model_deployment_monitoring_job( - job_service.GetModelDeploymentMonitoringJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_model_deployment_monitoring_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_deployment_monitoring_job.ModelDeploymentMonitoringJob() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_deployment_monitoring_job.ModelDeploymentMonitoringJob()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_model_deployment_monitoring_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_model_deployment_monitoring_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_model_deployment_monitoring_job( - job_service.GetModelDeploymentMonitoringJobRequest(), - name='name_value', - ) - - -def test_list_model_deployment_monitoring_jobs(transport: str = 'grpc', request_type=job_service.ListModelDeploymentMonitoringJobsRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_deployment_monitoring_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.ListModelDeploymentMonitoringJobsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_model_deployment_monitoring_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ListModelDeploymentMonitoringJobsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListModelDeploymentMonitoringJobsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_model_deployment_monitoring_jobs_from_dict(): - test_list_model_deployment_monitoring_jobs(request_type=dict) - - -def test_list_model_deployment_monitoring_jobs_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_deployment_monitoring_jobs), - '__call__') as call: - client.list_model_deployment_monitoring_jobs() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ListModelDeploymentMonitoringJobsRequest() - - -@pytest.mark.asyncio -async def test_list_model_deployment_monitoring_jobs_async(transport: str = 'grpc_asyncio', request_type=job_service.ListModelDeploymentMonitoringJobsRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_deployment_monitoring_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListModelDeploymentMonitoringJobsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_model_deployment_monitoring_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ListModelDeploymentMonitoringJobsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListModelDeploymentMonitoringJobsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_model_deployment_monitoring_jobs_async_from_dict(): - await test_list_model_deployment_monitoring_jobs_async(request_type=dict) - - -def test_list_model_deployment_monitoring_jobs_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.ListModelDeploymentMonitoringJobsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_deployment_monitoring_jobs), - '__call__') as call: - call.return_value = job_service.ListModelDeploymentMonitoringJobsResponse() - client.list_model_deployment_monitoring_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_model_deployment_monitoring_jobs_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.ListModelDeploymentMonitoringJobsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_deployment_monitoring_jobs), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListModelDeploymentMonitoringJobsResponse()) - await client.list_model_deployment_monitoring_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_model_deployment_monitoring_jobs_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_deployment_monitoring_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.ListModelDeploymentMonitoringJobsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_model_deployment_monitoring_jobs( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_model_deployment_monitoring_jobs_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_model_deployment_monitoring_jobs( - job_service.ListModelDeploymentMonitoringJobsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_model_deployment_monitoring_jobs_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_deployment_monitoring_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.ListModelDeploymentMonitoringJobsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListModelDeploymentMonitoringJobsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_model_deployment_monitoring_jobs( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_model_deployment_monitoring_jobs_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_model_deployment_monitoring_jobs( - job_service.ListModelDeploymentMonitoringJobsRequest(), - parent='parent_value', - ) - - -def test_list_model_deployment_monitoring_jobs_pager(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_deployment_monitoring_jobs), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListModelDeploymentMonitoringJobsResponse( - model_deployment_monitoring_jobs=[ - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - ], - next_page_token='abc', - ), - job_service.ListModelDeploymentMonitoringJobsResponse( - model_deployment_monitoring_jobs=[], - next_page_token='def', - ), - job_service.ListModelDeploymentMonitoringJobsResponse( - model_deployment_monitoring_jobs=[ - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - ], - next_page_token='ghi', - ), - job_service.ListModelDeploymentMonitoringJobsResponse( - model_deployment_monitoring_jobs=[ - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_model_deployment_monitoring_jobs(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, model_deployment_monitoring_job.ModelDeploymentMonitoringJob) - for i in results) - -def test_list_model_deployment_monitoring_jobs_pages(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_deployment_monitoring_jobs), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListModelDeploymentMonitoringJobsResponse( - model_deployment_monitoring_jobs=[ - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - ], - next_page_token='abc', - ), - job_service.ListModelDeploymentMonitoringJobsResponse( - model_deployment_monitoring_jobs=[], - next_page_token='def', - ), - job_service.ListModelDeploymentMonitoringJobsResponse( - model_deployment_monitoring_jobs=[ - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - ], - next_page_token='ghi', - ), - job_service.ListModelDeploymentMonitoringJobsResponse( - model_deployment_monitoring_jobs=[ - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - ], - ), - RuntimeError, - ) - pages = list(client.list_model_deployment_monitoring_jobs(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_model_deployment_monitoring_jobs_async_pager(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_deployment_monitoring_jobs), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListModelDeploymentMonitoringJobsResponse( - model_deployment_monitoring_jobs=[ - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - ], - next_page_token='abc', - ), - job_service.ListModelDeploymentMonitoringJobsResponse( - model_deployment_monitoring_jobs=[], - next_page_token='def', - ), - job_service.ListModelDeploymentMonitoringJobsResponse( - model_deployment_monitoring_jobs=[ - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - ], - next_page_token='ghi', - ), - job_service.ListModelDeploymentMonitoringJobsResponse( - model_deployment_monitoring_jobs=[ - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_model_deployment_monitoring_jobs(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, model_deployment_monitoring_job.ModelDeploymentMonitoringJob) - for i in responses) - -@pytest.mark.asyncio -async def test_list_model_deployment_monitoring_jobs_async_pages(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_deployment_monitoring_jobs), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListModelDeploymentMonitoringJobsResponse( - model_deployment_monitoring_jobs=[ - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - ], - next_page_token='abc', - ), - job_service.ListModelDeploymentMonitoringJobsResponse( - model_deployment_monitoring_jobs=[], - next_page_token='def', - ), - job_service.ListModelDeploymentMonitoringJobsResponse( - model_deployment_monitoring_jobs=[ - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - ], - next_page_token='ghi', - ), - job_service.ListModelDeploymentMonitoringJobsResponse( - model_deployment_monitoring_jobs=[ - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_model_deployment_monitoring_jobs(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_update_model_deployment_monitoring_job(transport: str = 'grpc', request_type=job_service.UpdateModelDeploymentMonitoringJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.update_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.UpdateModelDeploymentMonitoringJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_update_model_deployment_monitoring_job_from_dict(): - test_update_model_deployment_monitoring_job(request_type=dict) - - -def test_update_model_deployment_monitoring_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_model_deployment_monitoring_job), - '__call__') as call: - client.update_model_deployment_monitoring_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.UpdateModelDeploymentMonitoringJobRequest() - - -@pytest.mark.asyncio -async def test_update_model_deployment_monitoring_job_async(transport: str = 'grpc_asyncio', request_type=job_service.UpdateModelDeploymentMonitoringJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.update_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.UpdateModelDeploymentMonitoringJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_update_model_deployment_monitoring_job_async_from_dict(): - await test_update_model_deployment_monitoring_job_async(request_type=dict) - - -def test_update_model_deployment_monitoring_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.UpdateModelDeploymentMonitoringJobRequest() - - request.model_deployment_monitoring_job.name = 'model_deployment_monitoring_job.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_model_deployment_monitoring_job), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.update_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'model_deployment_monitoring_job.name=model_deployment_monitoring_job.name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_model_deployment_monitoring_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.UpdateModelDeploymentMonitoringJobRequest() - - request.model_deployment_monitoring_job.name = 'model_deployment_monitoring_job.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_model_deployment_monitoring_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.update_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'model_deployment_monitoring_job.name=model_deployment_monitoring_job.name/value', - ) in kw['metadata'] - - -def test_update_model_deployment_monitoring_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_model_deployment_monitoring_job( - model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].model_deployment_monitoring_job - mock_val = gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -def test_update_model_deployment_monitoring_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_model_deployment_monitoring_job( - job_service.UpdateModelDeploymentMonitoringJobRequest(), - model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.asyncio -async def test_update_model_deployment_monitoring_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_model_deployment_monitoring_job( - model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].model_deployment_monitoring_job - mock_val = gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_update_model_deployment_monitoring_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_model_deployment_monitoring_job( - job_service.UpdateModelDeploymentMonitoringJobRequest(), - model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_delete_model_deployment_monitoring_job(transport: str = 'grpc', request_type=job_service.DeleteModelDeploymentMonitoringJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.DeleteModelDeploymentMonitoringJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_model_deployment_monitoring_job_from_dict(): - test_delete_model_deployment_monitoring_job(request_type=dict) - - -def test_delete_model_deployment_monitoring_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model_deployment_monitoring_job), - '__call__') as call: - client.delete_model_deployment_monitoring_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.DeleteModelDeploymentMonitoringJobRequest() - - -@pytest.mark.asyncio -async def test_delete_model_deployment_monitoring_job_async(transport: str = 'grpc_asyncio', request_type=job_service.DeleteModelDeploymentMonitoringJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.DeleteModelDeploymentMonitoringJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_model_deployment_monitoring_job_async_from_dict(): - await test_delete_model_deployment_monitoring_job_async(request_type=dict) - - -def test_delete_model_deployment_monitoring_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.DeleteModelDeploymentMonitoringJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model_deployment_monitoring_job), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_model_deployment_monitoring_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.DeleteModelDeploymentMonitoringJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model_deployment_monitoring_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_model_deployment_monitoring_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_model_deployment_monitoring_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_delete_model_deployment_monitoring_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_model_deployment_monitoring_job( - job_service.DeleteModelDeploymentMonitoringJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_model_deployment_monitoring_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_model_deployment_monitoring_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_model_deployment_monitoring_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_model_deployment_monitoring_job( - job_service.DeleteModelDeploymentMonitoringJobRequest(), - name='name_value', - ) - - -def test_pause_model_deployment_monitoring_job(transport: str = 'grpc', request_type=job_service.PauseModelDeploymentMonitoringJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.pause_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.pause_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.PauseModelDeploymentMonitoringJobRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -def test_pause_model_deployment_monitoring_job_from_dict(): - test_pause_model_deployment_monitoring_job(request_type=dict) - - -def test_pause_model_deployment_monitoring_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.pause_model_deployment_monitoring_job), - '__call__') as call: - client.pause_model_deployment_monitoring_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.PauseModelDeploymentMonitoringJobRequest() - - -@pytest.mark.asyncio -async def test_pause_model_deployment_monitoring_job_async(transport: str = 'grpc_asyncio', request_type=job_service.PauseModelDeploymentMonitoringJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.pause_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.pause_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.PauseModelDeploymentMonitoringJobRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_pause_model_deployment_monitoring_job_async_from_dict(): - await test_pause_model_deployment_monitoring_job_async(request_type=dict) - - -def test_pause_model_deployment_monitoring_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.PauseModelDeploymentMonitoringJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.pause_model_deployment_monitoring_job), - '__call__') as call: - call.return_value = None - client.pause_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_pause_model_deployment_monitoring_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.PauseModelDeploymentMonitoringJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.pause_model_deployment_monitoring_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.pause_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_pause_model_deployment_monitoring_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.pause_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.pause_model_deployment_monitoring_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_pause_model_deployment_monitoring_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.pause_model_deployment_monitoring_job( - job_service.PauseModelDeploymentMonitoringJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_pause_model_deployment_monitoring_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.pause_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.pause_model_deployment_monitoring_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_pause_model_deployment_monitoring_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.pause_model_deployment_monitoring_job( - job_service.PauseModelDeploymentMonitoringJobRequest(), - name='name_value', - ) - - -def test_resume_model_deployment_monitoring_job(transport: str = 'grpc', request_type=job_service.ResumeModelDeploymentMonitoringJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.resume_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.resume_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ResumeModelDeploymentMonitoringJobRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -def test_resume_model_deployment_monitoring_job_from_dict(): - test_resume_model_deployment_monitoring_job(request_type=dict) - - -def test_resume_model_deployment_monitoring_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.resume_model_deployment_monitoring_job), - '__call__') as call: - client.resume_model_deployment_monitoring_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ResumeModelDeploymentMonitoringJobRequest() - - -@pytest.mark.asyncio -async def test_resume_model_deployment_monitoring_job_async(transport: str = 'grpc_asyncio', request_type=job_service.ResumeModelDeploymentMonitoringJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.resume_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.resume_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ResumeModelDeploymentMonitoringJobRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_resume_model_deployment_monitoring_job_async_from_dict(): - await test_resume_model_deployment_monitoring_job_async(request_type=dict) - - -def test_resume_model_deployment_monitoring_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.ResumeModelDeploymentMonitoringJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.resume_model_deployment_monitoring_job), - '__call__') as call: - call.return_value = None - client.resume_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_resume_model_deployment_monitoring_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.ResumeModelDeploymentMonitoringJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.resume_model_deployment_monitoring_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.resume_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_resume_model_deployment_monitoring_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.resume_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.resume_model_deployment_monitoring_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_resume_model_deployment_monitoring_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.resume_model_deployment_monitoring_job( - job_service.ResumeModelDeploymentMonitoringJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_resume_model_deployment_monitoring_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.resume_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.resume_model_deployment_monitoring_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_resume_model_deployment_monitoring_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.resume_model_deployment_monitoring_job( - job_service.ResumeModelDeploymentMonitoringJobRequest(), - name='name_value', - ) - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.JobServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.JobServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = JobServiceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.JobServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = JobServiceClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.JobServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = JobServiceClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.JobServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.JobServiceGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.JobServiceGrpcTransport, - transports.JobServiceGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.JobServiceGrpcTransport, - ) - -def test_job_service_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.JobServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_job_service_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.aiplatform_v1.services.job_service.transports.JobServiceTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.JobServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'create_custom_job', - 'get_custom_job', - 'list_custom_jobs', - 'delete_custom_job', - 'cancel_custom_job', - 'create_data_labeling_job', - 'get_data_labeling_job', - 'list_data_labeling_jobs', - 'delete_data_labeling_job', - 'cancel_data_labeling_job', - 'create_hyperparameter_tuning_job', - 'get_hyperparameter_tuning_job', - 'list_hyperparameter_tuning_jobs', - 'delete_hyperparameter_tuning_job', - 'cancel_hyperparameter_tuning_job', - 'create_batch_prediction_job', - 'get_batch_prediction_job', - 'list_batch_prediction_jobs', - 'delete_batch_prediction_job', - 'cancel_batch_prediction_job', - 'create_model_deployment_monitoring_job', - 'search_model_deployment_monitoring_stats_anomalies', - 'get_model_deployment_monitoring_job', - 'list_model_deployment_monitoring_jobs', - 'update_model_deployment_monitoring_job', - 'delete_model_deployment_monitoring_job', - 'pause_model_deployment_monitoring_job', - 'resume_model_deployment_monitoring_job', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - with pytest.raises(NotImplementedError): - transport.close() - - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client - - -def test_job_service_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.job_service.transports.JobServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.JobServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -def test_job_service_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1.services.job_service.transports.JobServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.JobServiceTransport() - adc.assert_called_once() - - -def test_job_service_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - JobServiceClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.JobServiceGrpcTransport, - transports.JobServiceGrpcAsyncIOTransport, - ], -) -def test_job_service_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.JobServiceGrpcTransport, grpc_helpers), - (transports.JobServiceGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_job_service_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "aiplatform.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="aiplatform.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.JobServiceGrpcTransport, transports.JobServiceGrpcAsyncIOTransport]) -def test_job_service_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_job_service_host_no_port(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_job_service_host_with_port(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:8000' - -def test_job_service_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.JobServiceGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_job_service_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.JobServiceGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.JobServiceGrpcTransport, transports.JobServiceGrpcAsyncIOTransport]) -def test_job_service_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.JobServiceGrpcTransport, transports.JobServiceGrpcAsyncIOTransport]) -def test_job_service_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_job_service_grpc_lro_client(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_job_service_grpc_lro_async_client(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc_asyncio', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsAsyncClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_batch_prediction_job_path(): - project = "squid" - location = "clam" - batch_prediction_job = "whelk" - expected = "projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}".format(project=project, location=location, batch_prediction_job=batch_prediction_job, ) - actual = JobServiceClient.batch_prediction_job_path(project, location, batch_prediction_job) - assert expected == actual - - -def test_parse_batch_prediction_job_path(): - expected = { - "project": "octopus", - "location": "oyster", - "batch_prediction_job": "nudibranch", - } - path = JobServiceClient.batch_prediction_job_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_batch_prediction_job_path(path) - assert expected == actual - -def test_custom_job_path(): - project = "cuttlefish" - location = "mussel" - custom_job = "winkle" - expected = "projects/{project}/locations/{location}/customJobs/{custom_job}".format(project=project, location=location, custom_job=custom_job, ) - actual = JobServiceClient.custom_job_path(project, location, custom_job) - assert expected == actual - - -def test_parse_custom_job_path(): - expected = { - "project": "nautilus", - "location": "scallop", - "custom_job": "abalone", - } - path = JobServiceClient.custom_job_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_custom_job_path(path) - assert expected == actual - -def test_data_labeling_job_path(): - project = "squid" - location = "clam" - data_labeling_job = "whelk" - expected = "projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}".format(project=project, location=location, data_labeling_job=data_labeling_job, ) - actual = JobServiceClient.data_labeling_job_path(project, location, data_labeling_job) - assert expected == actual - - -def test_parse_data_labeling_job_path(): - expected = { - "project": "octopus", - "location": "oyster", - "data_labeling_job": "nudibranch", - } - path = JobServiceClient.data_labeling_job_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_data_labeling_job_path(path) - assert expected == actual - -def test_dataset_path(): - project = "cuttlefish" - location = "mussel" - dataset = "winkle" - expected = "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) - actual = JobServiceClient.dataset_path(project, location, dataset) - assert expected == actual - - -def test_parse_dataset_path(): - expected = { - "project": "nautilus", - "location": "scallop", - "dataset": "abalone", - } - path = JobServiceClient.dataset_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_dataset_path(path) - assert expected == actual - -def test_endpoint_path(): - project = "squid" - location = "clam" - endpoint = "whelk" - expected = "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) - actual = JobServiceClient.endpoint_path(project, location, endpoint) - assert expected == actual - - -def test_parse_endpoint_path(): - expected = { - "project": "octopus", - "location": "oyster", - "endpoint": "nudibranch", - } - path = JobServiceClient.endpoint_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_endpoint_path(path) - assert expected == actual - -def test_hyperparameter_tuning_job_path(): - project = "cuttlefish" - location = "mussel" - hyperparameter_tuning_job = "winkle" - expected = "projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}".format(project=project, location=location, hyperparameter_tuning_job=hyperparameter_tuning_job, ) - actual = JobServiceClient.hyperparameter_tuning_job_path(project, location, hyperparameter_tuning_job) - assert expected == actual - - -def test_parse_hyperparameter_tuning_job_path(): - expected = { - "project": "nautilus", - "location": "scallop", - "hyperparameter_tuning_job": "abalone", - } - path = JobServiceClient.hyperparameter_tuning_job_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_hyperparameter_tuning_job_path(path) - assert expected == actual - -def test_model_path(): - project = "squid" - location = "clam" - model = "whelk" - expected = "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) - actual = JobServiceClient.model_path(project, location, model) - assert expected == actual - - -def test_parse_model_path(): - expected = { - "project": "octopus", - "location": "oyster", - "model": "nudibranch", - } - path = JobServiceClient.model_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_model_path(path) - assert expected == actual - -def test_model_deployment_monitoring_job_path(): - project = "cuttlefish" - location = "mussel" - model_deployment_monitoring_job = "winkle" - expected = "projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}".format(project=project, location=location, model_deployment_monitoring_job=model_deployment_monitoring_job, ) - actual = JobServiceClient.model_deployment_monitoring_job_path(project, location, model_deployment_monitoring_job) - assert expected == actual - - -def test_parse_model_deployment_monitoring_job_path(): - expected = { - "project": "nautilus", - "location": "scallop", - "model_deployment_monitoring_job": "abalone", - } - path = JobServiceClient.model_deployment_monitoring_job_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_model_deployment_monitoring_job_path(path) - assert expected == actual - -def test_network_path(): - project = "squid" - network = "clam" - expected = "projects/{project}/global/networks/{network}".format(project=project, network=network, ) - actual = JobServiceClient.network_path(project, network) - assert expected == actual - - -def test_parse_network_path(): - expected = { - "project": "whelk", - "network": "octopus", - } - path = JobServiceClient.network_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_network_path(path) - assert expected == actual - -def test_tensorboard_path(): - project = "oyster" - location = "nudibranch" - tensorboard = "cuttlefish" - expected = "projects/{project}/locations/{location}/tensorboards/{tensorboard}".format(project=project, location=location, tensorboard=tensorboard, ) - actual = JobServiceClient.tensorboard_path(project, location, tensorboard) - assert expected == actual - - -def test_parse_tensorboard_path(): - expected = { - "project": "mussel", - "location": "winkle", - "tensorboard": "nautilus", - } - path = JobServiceClient.tensorboard_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_tensorboard_path(path) - assert expected == actual - -def test_trial_path(): - project = "scallop" - location = "abalone" - study = "squid" - trial = "clam" - expected = "projects/{project}/locations/{location}/studies/{study}/trials/{trial}".format(project=project, location=location, study=study, trial=trial, ) - actual = JobServiceClient.trial_path(project, location, study, trial) - assert expected == actual - - -def test_parse_trial_path(): - expected = { - "project": "whelk", - "location": "octopus", - "study": "oyster", - "trial": "nudibranch", - } - path = JobServiceClient.trial_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_trial_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "cuttlefish" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = JobServiceClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "mussel", - } - path = JobServiceClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "winkle" - expected = "folders/{folder}".format(folder=folder, ) - actual = JobServiceClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "nautilus", - } - path = JobServiceClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "scallop" - expected = "organizations/{organization}".format(organization=organization, ) - actual = JobServiceClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "abalone", - } - path = JobServiceClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "squid" - expected = "projects/{project}".format(project=project, ) - actual = JobServiceClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "clam", - } - path = JobServiceClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "whelk" - location = "octopus" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = JobServiceClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "oyster", - "location": "nudibranch", - } - path = JobServiceClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.JobServiceTransport, '_prep_wrapped_messages') as prep: - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.JobServiceTransport, '_prep_wrapped_messages') as prep: - transport_class = JobServiceClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - -@pytest.mark.asyncio -async def test_transport_close_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: - async with client: - close.assert_not_called() - close.assert_called_once() - -def test_transport_close(): - transports = { - "grpc": "_grpc_channel", - } - - for transport, close_name in transports.items(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: - with client: - close.assert_not_called() - close.assert_called_once() - -def test_client_ctx(): - transports = [ - 'grpc', - ] - for transport in transports: - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - # Test client calls underlying transport. - with mock.patch.object(type(client.transport), "close") as close: - close.assert_not_called() - with client: - pass - close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_metadata_service.py b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_metadata_service.py deleted file mode 100644 index 1f90070077..0000000000 --- a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_metadata_service.py +++ /dev/null @@ -1,9706 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import future -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import operation_async # type: ignore -from google.api_core import operations_v1 -from google.api_core import path_template -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.aiplatform_v1.services.metadata_service import MetadataServiceAsyncClient -from google.cloud.aiplatform_v1.services.metadata_service import MetadataServiceClient -from google.cloud.aiplatform_v1.services.metadata_service import pagers -from google.cloud.aiplatform_v1.services.metadata_service import transports -from google.cloud.aiplatform_v1.types import artifact -from google.cloud.aiplatform_v1.types import artifact as gca_artifact -from google.cloud.aiplatform_v1.types import context -from google.cloud.aiplatform_v1.types import context as gca_context -from google.cloud.aiplatform_v1.types import encryption_spec -from google.cloud.aiplatform_v1.types import event -from google.cloud.aiplatform_v1.types import execution -from google.cloud.aiplatform_v1.types import execution as gca_execution -from google.cloud.aiplatform_v1.types import lineage_subgraph -from google.cloud.aiplatform_v1.types import metadata_schema -from google.cloud.aiplatform_v1.types import metadata_schema as gca_metadata_schema -from google.cloud.aiplatform_v1.types import metadata_service -from google.cloud.aiplatform_v1.types import metadata_store -from google.cloud.aiplatform_v1.types import metadata_store as gca_metadata_store -from google.cloud.aiplatform_v1.types import operation as gca_operation -from google.longrunning import operations_pb2 -from google.oauth2 import service_account -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -import google.auth - - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert MetadataServiceClient._get_default_mtls_endpoint(None) is None - assert MetadataServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert MetadataServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert MetadataServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert MetadataServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert MetadataServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - MetadataServiceClient, - MetadataServiceAsyncClient, -]) -def test_metadata_service_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.MetadataServiceGrpcTransport, "grpc"), - (transports.MetadataServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_metadata_service_client_service_account_always_use_jwt(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=False) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("client_class", [ - MetadataServiceClient, - MetadataServiceAsyncClient, -]) -def test_metadata_service_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_metadata_service_client_get_transport_class(): - transport = MetadataServiceClient.get_transport_class() - available_transports = [ - transports.MetadataServiceGrpcTransport, - ] - assert transport in available_transports - - transport = MetadataServiceClient.get_transport_class("grpc") - assert transport == transports.MetadataServiceGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (MetadataServiceClient, transports.MetadataServiceGrpcTransport, "grpc"), - (MetadataServiceAsyncClient, transports.MetadataServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(MetadataServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MetadataServiceClient)) -@mock.patch.object(MetadataServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MetadataServiceAsyncClient)) -def test_metadata_service_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(MetadataServiceClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(MetadataServiceClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (MetadataServiceClient, transports.MetadataServiceGrpcTransport, "grpc", "true"), - (MetadataServiceAsyncClient, transports.MetadataServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (MetadataServiceClient, transports.MetadataServiceGrpcTransport, "grpc", "false"), - (MetadataServiceAsyncClient, transports.MetadataServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(MetadataServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MetadataServiceClient)) -@mock.patch.object(MetadataServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MetadataServiceAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_metadata_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (MetadataServiceClient, transports.MetadataServiceGrpcTransport, "grpc"), - (MetadataServiceAsyncClient, transports.MetadataServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_metadata_service_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (MetadataServiceClient, transports.MetadataServiceGrpcTransport, "grpc"), - (MetadataServiceAsyncClient, transports.MetadataServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_metadata_service_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_metadata_service_client_client_options_from_dict(): - with mock.patch('google.cloud.aiplatform_v1.services.metadata_service.transports.MetadataServiceGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = MetadataServiceClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_create_metadata_store(transport: str = 'grpc', request_type=metadata_service.CreateMetadataStoreRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_metadata_store), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.create_metadata_store(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.CreateMetadataStoreRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_create_metadata_store_from_dict(): - test_create_metadata_store(request_type=dict) - - -def test_create_metadata_store_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_metadata_store), - '__call__') as call: - client.create_metadata_store() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.CreateMetadataStoreRequest() - - -@pytest.mark.asyncio -async def test_create_metadata_store_async(transport: str = 'grpc_asyncio', request_type=metadata_service.CreateMetadataStoreRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_metadata_store), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.create_metadata_store(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.CreateMetadataStoreRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_create_metadata_store_async_from_dict(): - await test_create_metadata_store_async(request_type=dict) - - -def test_create_metadata_store_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.CreateMetadataStoreRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_metadata_store), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.create_metadata_store(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_metadata_store_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.CreateMetadataStoreRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_metadata_store), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.create_metadata_store(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_metadata_store_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_metadata_store), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_metadata_store( - parent='parent_value', - metadata_store=gca_metadata_store.MetadataStore(name='name_value'), - metadata_store_id='metadata_store_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].metadata_store - mock_val = gca_metadata_store.MetadataStore(name='name_value') - assert arg == mock_val - arg = args[0].metadata_store_id - mock_val = 'metadata_store_id_value' - assert arg == mock_val - - -def test_create_metadata_store_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_metadata_store( - metadata_service.CreateMetadataStoreRequest(), - parent='parent_value', - metadata_store=gca_metadata_store.MetadataStore(name='name_value'), - metadata_store_id='metadata_store_id_value', - ) - - -@pytest.mark.asyncio -async def test_create_metadata_store_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_metadata_store), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_metadata_store( - parent='parent_value', - metadata_store=gca_metadata_store.MetadataStore(name='name_value'), - metadata_store_id='metadata_store_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].metadata_store - mock_val = gca_metadata_store.MetadataStore(name='name_value') - assert arg == mock_val - arg = args[0].metadata_store_id - mock_val = 'metadata_store_id_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_metadata_store_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_metadata_store( - metadata_service.CreateMetadataStoreRequest(), - parent='parent_value', - metadata_store=gca_metadata_store.MetadataStore(name='name_value'), - metadata_store_id='metadata_store_id_value', - ) - - -def test_get_metadata_store(transport: str = 'grpc', request_type=metadata_service.GetMetadataStoreRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_metadata_store), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_store.MetadataStore( - name='name_value', - description='description_value', - ) - response = client.get_metadata_store(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.GetMetadataStoreRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, metadata_store.MetadataStore) - assert response.name == 'name_value' - assert response.description == 'description_value' - - -def test_get_metadata_store_from_dict(): - test_get_metadata_store(request_type=dict) - - -def test_get_metadata_store_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_metadata_store), - '__call__') as call: - client.get_metadata_store() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.GetMetadataStoreRequest() - - -@pytest.mark.asyncio -async def test_get_metadata_store_async(transport: str = 'grpc_asyncio', request_type=metadata_service.GetMetadataStoreRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_metadata_store), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(metadata_store.MetadataStore( - name='name_value', - description='description_value', - )) - response = await client.get_metadata_store(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.GetMetadataStoreRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, metadata_store.MetadataStore) - assert response.name == 'name_value' - assert response.description == 'description_value' - - -@pytest.mark.asyncio -async def test_get_metadata_store_async_from_dict(): - await test_get_metadata_store_async(request_type=dict) - - -def test_get_metadata_store_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.GetMetadataStoreRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_metadata_store), - '__call__') as call: - call.return_value = metadata_store.MetadataStore() - client.get_metadata_store(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_metadata_store_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.GetMetadataStoreRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_metadata_store), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_store.MetadataStore()) - await client.get_metadata_store(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_metadata_store_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_metadata_store), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_store.MetadataStore() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_metadata_store( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_metadata_store_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_metadata_store( - metadata_service.GetMetadataStoreRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_metadata_store_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_metadata_store), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_store.MetadataStore() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_store.MetadataStore()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_metadata_store( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_metadata_store_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_metadata_store( - metadata_service.GetMetadataStoreRequest(), - name='name_value', - ) - - -def test_list_metadata_stores(transport: str = 'grpc', request_type=metadata_service.ListMetadataStoresRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_stores), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.ListMetadataStoresResponse( - next_page_token='next_page_token_value', - ) - response = client.list_metadata_stores(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.ListMetadataStoresRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListMetadataStoresPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_metadata_stores_from_dict(): - test_list_metadata_stores(request_type=dict) - - -def test_list_metadata_stores_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_stores), - '__call__') as call: - client.list_metadata_stores() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.ListMetadataStoresRequest() - - -@pytest.mark.asyncio -async def test_list_metadata_stores_async(transport: str = 'grpc_asyncio', request_type=metadata_service.ListMetadataStoresRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_stores), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListMetadataStoresResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_metadata_stores(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.ListMetadataStoresRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListMetadataStoresAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_metadata_stores_async_from_dict(): - await test_list_metadata_stores_async(request_type=dict) - - -def test_list_metadata_stores_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.ListMetadataStoresRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_stores), - '__call__') as call: - call.return_value = metadata_service.ListMetadataStoresResponse() - client.list_metadata_stores(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_metadata_stores_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.ListMetadataStoresRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_stores), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListMetadataStoresResponse()) - await client.list_metadata_stores(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_metadata_stores_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_stores), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.ListMetadataStoresResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_metadata_stores( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_metadata_stores_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_metadata_stores( - metadata_service.ListMetadataStoresRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_metadata_stores_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_stores), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.ListMetadataStoresResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListMetadataStoresResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_metadata_stores( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_metadata_stores_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_metadata_stores( - metadata_service.ListMetadataStoresRequest(), - parent='parent_value', - ) - - -def test_list_metadata_stores_pager(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_stores), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - metadata_service.ListMetadataStoresResponse( - metadata_stores=[ - metadata_store.MetadataStore(), - metadata_store.MetadataStore(), - metadata_store.MetadataStore(), - ], - next_page_token='abc', - ), - metadata_service.ListMetadataStoresResponse( - metadata_stores=[], - next_page_token='def', - ), - metadata_service.ListMetadataStoresResponse( - metadata_stores=[ - metadata_store.MetadataStore(), - ], - next_page_token='ghi', - ), - metadata_service.ListMetadataStoresResponse( - metadata_stores=[ - metadata_store.MetadataStore(), - metadata_store.MetadataStore(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_metadata_stores(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, metadata_store.MetadataStore) - for i in results) - -def test_list_metadata_stores_pages(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_stores), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - metadata_service.ListMetadataStoresResponse( - metadata_stores=[ - metadata_store.MetadataStore(), - metadata_store.MetadataStore(), - metadata_store.MetadataStore(), - ], - next_page_token='abc', - ), - metadata_service.ListMetadataStoresResponse( - metadata_stores=[], - next_page_token='def', - ), - metadata_service.ListMetadataStoresResponse( - metadata_stores=[ - metadata_store.MetadataStore(), - ], - next_page_token='ghi', - ), - metadata_service.ListMetadataStoresResponse( - metadata_stores=[ - metadata_store.MetadataStore(), - metadata_store.MetadataStore(), - ], - ), - RuntimeError, - ) - pages = list(client.list_metadata_stores(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_metadata_stores_async_pager(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_stores), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - metadata_service.ListMetadataStoresResponse( - metadata_stores=[ - metadata_store.MetadataStore(), - metadata_store.MetadataStore(), - metadata_store.MetadataStore(), - ], - next_page_token='abc', - ), - metadata_service.ListMetadataStoresResponse( - metadata_stores=[], - next_page_token='def', - ), - metadata_service.ListMetadataStoresResponse( - metadata_stores=[ - metadata_store.MetadataStore(), - ], - next_page_token='ghi', - ), - metadata_service.ListMetadataStoresResponse( - metadata_stores=[ - metadata_store.MetadataStore(), - metadata_store.MetadataStore(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_metadata_stores(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, metadata_store.MetadataStore) - for i in responses) - -@pytest.mark.asyncio -async def test_list_metadata_stores_async_pages(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_stores), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - metadata_service.ListMetadataStoresResponse( - metadata_stores=[ - metadata_store.MetadataStore(), - metadata_store.MetadataStore(), - metadata_store.MetadataStore(), - ], - next_page_token='abc', - ), - metadata_service.ListMetadataStoresResponse( - metadata_stores=[], - next_page_token='def', - ), - metadata_service.ListMetadataStoresResponse( - metadata_stores=[ - metadata_store.MetadataStore(), - ], - next_page_token='ghi', - ), - metadata_service.ListMetadataStoresResponse( - metadata_stores=[ - metadata_store.MetadataStore(), - metadata_store.MetadataStore(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_metadata_stores(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_delete_metadata_store(transport: str = 'grpc', request_type=metadata_service.DeleteMetadataStoreRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_metadata_store), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_metadata_store(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.DeleteMetadataStoreRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_metadata_store_from_dict(): - test_delete_metadata_store(request_type=dict) - - -def test_delete_metadata_store_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_metadata_store), - '__call__') as call: - client.delete_metadata_store() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.DeleteMetadataStoreRequest() - - -@pytest.mark.asyncio -async def test_delete_metadata_store_async(transport: str = 'grpc_asyncio', request_type=metadata_service.DeleteMetadataStoreRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_metadata_store), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_metadata_store(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.DeleteMetadataStoreRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_metadata_store_async_from_dict(): - await test_delete_metadata_store_async(request_type=dict) - - -def test_delete_metadata_store_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.DeleteMetadataStoreRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_metadata_store), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_metadata_store(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_metadata_store_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.DeleteMetadataStoreRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_metadata_store), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_metadata_store(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_metadata_store_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_metadata_store), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_metadata_store( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_delete_metadata_store_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_metadata_store( - metadata_service.DeleteMetadataStoreRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_metadata_store_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_metadata_store), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_metadata_store( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_metadata_store_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_metadata_store( - metadata_service.DeleteMetadataStoreRequest(), - name='name_value', - ) - - -def test_create_artifact(transport: str = 'grpc', request_type=metadata_service.CreateArtifactRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_artifact), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_artifact.Artifact( - name='name_value', - display_name='display_name_value', - uri='uri_value', - etag='etag_value', - state=gca_artifact.Artifact.State.PENDING, - schema_title='schema_title_value', - schema_version='schema_version_value', - description='description_value', - ) - response = client.create_artifact(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.CreateArtifactRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_artifact.Artifact) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.uri == 'uri_value' - assert response.etag == 'etag_value' - assert response.state == gca_artifact.Artifact.State.PENDING - assert response.schema_title == 'schema_title_value' - assert response.schema_version == 'schema_version_value' - assert response.description == 'description_value' - - -def test_create_artifact_from_dict(): - test_create_artifact(request_type=dict) - - -def test_create_artifact_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_artifact), - '__call__') as call: - client.create_artifact() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.CreateArtifactRequest() - - -@pytest.mark.asyncio -async def test_create_artifact_async(transport: str = 'grpc_asyncio', request_type=metadata_service.CreateArtifactRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_artifact), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_artifact.Artifact( - name='name_value', - display_name='display_name_value', - uri='uri_value', - etag='etag_value', - state=gca_artifact.Artifact.State.PENDING, - schema_title='schema_title_value', - schema_version='schema_version_value', - description='description_value', - )) - response = await client.create_artifact(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.CreateArtifactRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_artifact.Artifact) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.uri == 'uri_value' - assert response.etag == 'etag_value' - assert response.state == gca_artifact.Artifact.State.PENDING - assert response.schema_title == 'schema_title_value' - assert response.schema_version == 'schema_version_value' - assert response.description == 'description_value' - - -@pytest.mark.asyncio -async def test_create_artifact_async_from_dict(): - await test_create_artifact_async(request_type=dict) - - -def test_create_artifact_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.CreateArtifactRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_artifact), - '__call__') as call: - call.return_value = gca_artifact.Artifact() - client.create_artifact(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_artifact_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.CreateArtifactRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_artifact), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_artifact.Artifact()) - await client.create_artifact(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_artifact_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_artifact), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_artifact.Artifact() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_artifact( - parent='parent_value', - artifact=gca_artifact.Artifact(name='name_value'), - artifact_id='artifact_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].artifact - mock_val = gca_artifact.Artifact(name='name_value') - assert arg == mock_val - arg = args[0].artifact_id - mock_val = 'artifact_id_value' - assert arg == mock_val - - -def test_create_artifact_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_artifact( - metadata_service.CreateArtifactRequest(), - parent='parent_value', - artifact=gca_artifact.Artifact(name='name_value'), - artifact_id='artifact_id_value', - ) - - -@pytest.mark.asyncio -async def test_create_artifact_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_artifact), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_artifact.Artifact() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_artifact.Artifact()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_artifact( - parent='parent_value', - artifact=gca_artifact.Artifact(name='name_value'), - artifact_id='artifact_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].artifact - mock_val = gca_artifact.Artifact(name='name_value') - assert arg == mock_val - arg = args[0].artifact_id - mock_val = 'artifact_id_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_artifact_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_artifact( - metadata_service.CreateArtifactRequest(), - parent='parent_value', - artifact=gca_artifact.Artifact(name='name_value'), - artifact_id='artifact_id_value', - ) - - -def test_get_artifact(transport: str = 'grpc', request_type=metadata_service.GetArtifactRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_artifact), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = artifact.Artifact( - name='name_value', - display_name='display_name_value', - uri='uri_value', - etag='etag_value', - state=artifact.Artifact.State.PENDING, - schema_title='schema_title_value', - schema_version='schema_version_value', - description='description_value', - ) - response = client.get_artifact(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.GetArtifactRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, artifact.Artifact) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.uri == 'uri_value' - assert response.etag == 'etag_value' - assert response.state == artifact.Artifact.State.PENDING - assert response.schema_title == 'schema_title_value' - assert response.schema_version == 'schema_version_value' - assert response.description == 'description_value' - - -def test_get_artifact_from_dict(): - test_get_artifact(request_type=dict) - - -def test_get_artifact_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_artifact), - '__call__') as call: - client.get_artifact() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.GetArtifactRequest() - - -@pytest.mark.asyncio -async def test_get_artifact_async(transport: str = 'grpc_asyncio', request_type=metadata_service.GetArtifactRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_artifact), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(artifact.Artifact( - name='name_value', - display_name='display_name_value', - uri='uri_value', - etag='etag_value', - state=artifact.Artifact.State.PENDING, - schema_title='schema_title_value', - schema_version='schema_version_value', - description='description_value', - )) - response = await client.get_artifact(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.GetArtifactRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, artifact.Artifact) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.uri == 'uri_value' - assert response.etag == 'etag_value' - assert response.state == artifact.Artifact.State.PENDING - assert response.schema_title == 'schema_title_value' - assert response.schema_version == 'schema_version_value' - assert response.description == 'description_value' - - -@pytest.mark.asyncio -async def test_get_artifact_async_from_dict(): - await test_get_artifact_async(request_type=dict) - - -def test_get_artifact_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.GetArtifactRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_artifact), - '__call__') as call: - call.return_value = artifact.Artifact() - client.get_artifact(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_artifact_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.GetArtifactRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_artifact), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(artifact.Artifact()) - await client.get_artifact(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_artifact_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_artifact), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = artifact.Artifact() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_artifact( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_artifact_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_artifact( - metadata_service.GetArtifactRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_artifact_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_artifact), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = artifact.Artifact() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(artifact.Artifact()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_artifact( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_artifact_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_artifact( - metadata_service.GetArtifactRequest(), - name='name_value', - ) - - -def test_list_artifacts(transport: str = 'grpc', request_type=metadata_service.ListArtifactsRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_artifacts), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.ListArtifactsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_artifacts(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.ListArtifactsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListArtifactsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_artifacts_from_dict(): - test_list_artifacts(request_type=dict) - - -def test_list_artifacts_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_artifacts), - '__call__') as call: - client.list_artifacts() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.ListArtifactsRequest() - - -@pytest.mark.asyncio -async def test_list_artifacts_async(transport: str = 'grpc_asyncio', request_type=metadata_service.ListArtifactsRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_artifacts), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListArtifactsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_artifacts(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.ListArtifactsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListArtifactsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_artifacts_async_from_dict(): - await test_list_artifacts_async(request_type=dict) - - -def test_list_artifacts_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.ListArtifactsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_artifacts), - '__call__') as call: - call.return_value = metadata_service.ListArtifactsResponse() - client.list_artifacts(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_artifacts_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.ListArtifactsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_artifacts), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListArtifactsResponse()) - await client.list_artifacts(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_artifacts_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_artifacts), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.ListArtifactsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_artifacts( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_artifacts_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_artifacts( - metadata_service.ListArtifactsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_artifacts_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_artifacts), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.ListArtifactsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListArtifactsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_artifacts( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_artifacts_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_artifacts( - metadata_service.ListArtifactsRequest(), - parent='parent_value', - ) - - -def test_list_artifacts_pager(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_artifacts), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - metadata_service.ListArtifactsResponse( - artifacts=[ - artifact.Artifact(), - artifact.Artifact(), - artifact.Artifact(), - ], - next_page_token='abc', - ), - metadata_service.ListArtifactsResponse( - artifacts=[], - next_page_token='def', - ), - metadata_service.ListArtifactsResponse( - artifacts=[ - artifact.Artifact(), - ], - next_page_token='ghi', - ), - metadata_service.ListArtifactsResponse( - artifacts=[ - artifact.Artifact(), - artifact.Artifact(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_artifacts(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, artifact.Artifact) - for i in results) - -def test_list_artifacts_pages(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_artifacts), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - metadata_service.ListArtifactsResponse( - artifacts=[ - artifact.Artifact(), - artifact.Artifact(), - artifact.Artifact(), - ], - next_page_token='abc', - ), - metadata_service.ListArtifactsResponse( - artifacts=[], - next_page_token='def', - ), - metadata_service.ListArtifactsResponse( - artifacts=[ - artifact.Artifact(), - ], - next_page_token='ghi', - ), - metadata_service.ListArtifactsResponse( - artifacts=[ - artifact.Artifact(), - artifact.Artifact(), - ], - ), - RuntimeError, - ) - pages = list(client.list_artifacts(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_artifacts_async_pager(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_artifacts), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - metadata_service.ListArtifactsResponse( - artifacts=[ - artifact.Artifact(), - artifact.Artifact(), - artifact.Artifact(), - ], - next_page_token='abc', - ), - metadata_service.ListArtifactsResponse( - artifacts=[], - next_page_token='def', - ), - metadata_service.ListArtifactsResponse( - artifacts=[ - artifact.Artifact(), - ], - next_page_token='ghi', - ), - metadata_service.ListArtifactsResponse( - artifacts=[ - artifact.Artifact(), - artifact.Artifact(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_artifacts(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, artifact.Artifact) - for i in responses) - -@pytest.mark.asyncio -async def test_list_artifacts_async_pages(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_artifacts), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - metadata_service.ListArtifactsResponse( - artifacts=[ - artifact.Artifact(), - artifact.Artifact(), - artifact.Artifact(), - ], - next_page_token='abc', - ), - metadata_service.ListArtifactsResponse( - artifacts=[], - next_page_token='def', - ), - metadata_service.ListArtifactsResponse( - artifacts=[ - artifact.Artifact(), - ], - next_page_token='ghi', - ), - metadata_service.ListArtifactsResponse( - artifacts=[ - artifact.Artifact(), - artifact.Artifact(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_artifacts(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_update_artifact(transport: str = 'grpc', request_type=metadata_service.UpdateArtifactRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_artifact), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_artifact.Artifact( - name='name_value', - display_name='display_name_value', - uri='uri_value', - etag='etag_value', - state=gca_artifact.Artifact.State.PENDING, - schema_title='schema_title_value', - schema_version='schema_version_value', - description='description_value', - ) - response = client.update_artifact(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.UpdateArtifactRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_artifact.Artifact) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.uri == 'uri_value' - assert response.etag == 'etag_value' - assert response.state == gca_artifact.Artifact.State.PENDING - assert response.schema_title == 'schema_title_value' - assert response.schema_version == 'schema_version_value' - assert response.description == 'description_value' - - -def test_update_artifact_from_dict(): - test_update_artifact(request_type=dict) - - -def test_update_artifact_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_artifact), - '__call__') as call: - client.update_artifact() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.UpdateArtifactRequest() - - -@pytest.mark.asyncio -async def test_update_artifact_async(transport: str = 'grpc_asyncio', request_type=metadata_service.UpdateArtifactRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_artifact), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_artifact.Artifact( - name='name_value', - display_name='display_name_value', - uri='uri_value', - etag='etag_value', - state=gca_artifact.Artifact.State.PENDING, - schema_title='schema_title_value', - schema_version='schema_version_value', - description='description_value', - )) - response = await client.update_artifact(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.UpdateArtifactRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_artifact.Artifact) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.uri == 'uri_value' - assert response.etag == 'etag_value' - assert response.state == gca_artifact.Artifact.State.PENDING - assert response.schema_title == 'schema_title_value' - assert response.schema_version == 'schema_version_value' - assert response.description == 'description_value' - - -@pytest.mark.asyncio -async def test_update_artifact_async_from_dict(): - await test_update_artifact_async(request_type=dict) - - -def test_update_artifact_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.UpdateArtifactRequest() - - request.artifact.name = 'artifact.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_artifact), - '__call__') as call: - call.return_value = gca_artifact.Artifact() - client.update_artifact(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'artifact.name=artifact.name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_artifact_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.UpdateArtifactRequest() - - request.artifact.name = 'artifact.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_artifact), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_artifact.Artifact()) - await client.update_artifact(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'artifact.name=artifact.name/value', - ) in kw['metadata'] - - -def test_update_artifact_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_artifact), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_artifact.Artifact() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_artifact( - artifact=gca_artifact.Artifact(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].artifact - mock_val = gca_artifact.Artifact(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -def test_update_artifact_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_artifact( - metadata_service.UpdateArtifactRequest(), - artifact=gca_artifact.Artifact(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.asyncio -async def test_update_artifact_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_artifact), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_artifact.Artifact() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_artifact.Artifact()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_artifact( - artifact=gca_artifact.Artifact(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].artifact - mock_val = gca_artifact.Artifact(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_update_artifact_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_artifact( - metadata_service.UpdateArtifactRequest(), - artifact=gca_artifact.Artifact(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_delete_artifact(transport: str = 'grpc', request_type=metadata_service.DeleteArtifactRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_artifact), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_artifact(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.DeleteArtifactRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_artifact_from_dict(): - test_delete_artifact(request_type=dict) - - -def test_delete_artifact_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_artifact), - '__call__') as call: - client.delete_artifact() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.DeleteArtifactRequest() - - -@pytest.mark.asyncio -async def test_delete_artifact_async(transport: str = 'grpc_asyncio', request_type=metadata_service.DeleteArtifactRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_artifact), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_artifact(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.DeleteArtifactRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_artifact_async_from_dict(): - await test_delete_artifact_async(request_type=dict) - - -def test_delete_artifact_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.DeleteArtifactRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_artifact), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_artifact(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_artifact_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.DeleteArtifactRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_artifact), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_artifact(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_artifact_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_artifact), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_artifact( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_delete_artifact_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_artifact( - metadata_service.DeleteArtifactRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_artifact_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_artifact), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_artifact( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_artifact_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_artifact( - metadata_service.DeleteArtifactRequest(), - name='name_value', - ) - - -def test_purge_artifacts(transport: str = 'grpc', request_type=metadata_service.PurgeArtifactsRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.purge_artifacts), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.purge_artifacts(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.PurgeArtifactsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_purge_artifacts_from_dict(): - test_purge_artifacts(request_type=dict) - - -def test_purge_artifacts_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.purge_artifacts), - '__call__') as call: - client.purge_artifacts() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.PurgeArtifactsRequest() - - -@pytest.mark.asyncio -async def test_purge_artifacts_async(transport: str = 'grpc_asyncio', request_type=metadata_service.PurgeArtifactsRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.purge_artifacts), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.purge_artifacts(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.PurgeArtifactsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_purge_artifacts_async_from_dict(): - await test_purge_artifacts_async(request_type=dict) - - -def test_purge_artifacts_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.PurgeArtifactsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.purge_artifacts), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.purge_artifacts(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_purge_artifacts_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.PurgeArtifactsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.purge_artifacts), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.purge_artifacts(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_purge_artifacts_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.purge_artifacts), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.purge_artifacts( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_purge_artifacts_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.purge_artifacts( - metadata_service.PurgeArtifactsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_purge_artifacts_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.purge_artifacts), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.purge_artifacts( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_purge_artifacts_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.purge_artifacts( - metadata_service.PurgeArtifactsRequest(), - parent='parent_value', - ) - - -def test_create_context(transport: str = 'grpc', request_type=metadata_service.CreateContextRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_context), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_context.Context( - name='name_value', - display_name='display_name_value', - etag='etag_value', - parent_contexts=['parent_contexts_value'], - schema_title='schema_title_value', - schema_version='schema_version_value', - description='description_value', - ) - response = client.create_context(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.CreateContextRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_context.Context) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.etag == 'etag_value' - assert response.parent_contexts == ['parent_contexts_value'] - assert response.schema_title == 'schema_title_value' - assert response.schema_version == 'schema_version_value' - assert response.description == 'description_value' - - -def test_create_context_from_dict(): - test_create_context(request_type=dict) - - -def test_create_context_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_context), - '__call__') as call: - client.create_context() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.CreateContextRequest() - - -@pytest.mark.asyncio -async def test_create_context_async(transport: str = 'grpc_asyncio', request_type=metadata_service.CreateContextRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_context), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_context.Context( - name='name_value', - display_name='display_name_value', - etag='etag_value', - parent_contexts=['parent_contexts_value'], - schema_title='schema_title_value', - schema_version='schema_version_value', - description='description_value', - )) - response = await client.create_context(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.CreateContextRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_context.Context) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.etag == 'etag_value' - assert response.parent_contexts == ['parent_contexts_value'] - assert response.schema_title == 'schema_title_value' - assert response.schema_version == 'schema_version_value' - assert response.description == 'description_value' - - -@pytest.mark.asyncio -async def test_create_context_async_from_dict(): - await test_create_context_async(request_type=dict) - - -def test_create_context_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.CreateContextRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_context), - '__call__') as call: - call.return_value = gca_context.Context() - client.create_context(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_context_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.CreateContextRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_context), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_context.Context()) - await client.create_context(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_context_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_context), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_context.Context() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_context( - parent='parent_value', - context=gca_context.Context(name='name_value'), - context_id='context_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].context - mock_val = gca_context.Context(name='name_value') - assert arg == mock_val - arg = args[0].context_id - mock_val = 'context_id_value' - assert arg == mock_val - - -def test_create_context_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_context( - metadata_service.CreateContextRequest(), - parent='parent_value', - context=gca_context.Context(name='name_value'), - context_id='context_id_value', - ) - - -@pytest.mark.asyncio -async def test_create_context_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_context), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_context.Context() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_context.Context()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_context( - parent='parent_value', - context=gca_context.Context(name='name_value'), - context_id='context_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].context - mock_val = gca_context.Context(name='name_value') - assert arg == mock_val - arg = args[0].context_id - mock_val = 'context_id_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_context_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_context( - metadata_service.CreateContextRequest(), - parent='parent_value', - context=gca_context.Context(name='name_value'), - context_id='context_id_value', - ) - - -def test_get_context(transport: str = 'grpc', request_type=metadata_service.GetContextRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_context), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = context.Context( - name='name_value', - display_name='display_name_value', - etag='etag_value', - parent_contexts=['parent_contexts_value'], - schema_title='schema_title_value', - schema_version='schema_version_value', - description='description_value', - ) - response = client.get_context(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.GetContextRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, context.Context) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.etag == 'etag_value' - assert response.parent_contexts == ['parent_contexts_value'] - assert response.schema_title == 'schema_title_value' - assert response.schema_version == 'schema_version_value' - assert response.description == 'description_value' - - -def test_get_context_from_dict(): - test_get_context(request_type=dict) - - -def test_get_context_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_context), - '__call__') as call: - client.get_context() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.GetContextRequest() - - -@pytest.mark.asyncio -async def test_get_context_async(transport: str = 'grpc_asyncio', request_type=metadata_service.GetContextRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_context), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(context.Context( - name='name_value', - display_name='display_name_value', - etag='etag_value', - parent_contexts=['parent_contexts_value'], - schema_title='schema_title_value', - schema_version='schema_version_value', - description='description_value', - )) - response = await client.get_context(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.GetContextRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, context.Context) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.etag == 'etag_value' - assert response.parent_contexts == ['parent_contexts_value'] - assert response.schema_title == 'schema_title_value' - assert response.schema_version == 'schema_version_value' - assert response.description == 'description_value' - - -@pytest.mark.asyncio -async def test_get_context_async_from_dict(): - await test_get_context_async(request_type=dict) - - -def test_get_context_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.GetContextRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_context), - '__call__') as call: - call.return_value = context.Context() - client.get_context(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_context_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.GetContextRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_context), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(context.Context()) - await client.get_context(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_context_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_context), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = context.Context() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_context( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_context_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_context( - metadata_service.GetContextRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_context_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_context), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = context.Context() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(context.Context()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_context( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_context_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_context( - metadata_service.GetContextRequest(), - name='name_value', - ) - - -def test_list_contexts(transport: str = 'grpc', request_type=metadata_service.ListContextsRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_contexts), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.ListContextsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_contexts(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.ListContextsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListContextsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_contexts_from_dict(): - test_list_contexts(request_type=dict) - - -def test_list_contexts_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_contexts), - '__call__') as call: - client.list_contexts() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.ListContextsRequest() - - -@pytest.mark.asyncio -async def test_list_contexts_async(transport: str = 'grpc_asyncio', request_type=metadata_service.ListContextsRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_contexts), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListContextsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_contexts(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.ListContextsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListContextsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_contexts_async_from_dict(): - await test_list_contexts_async(request_type=dict) - - -def test_list_contexts_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.ListContextsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_contexts), - '__call__') as call: - call.return_value = metadata_service.ListContextsResponse() - client.list_contexts(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_contexts_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.ListContextsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_contexts), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListContextsResponse()) - await client.list_contexts(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_contexts_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_contexts), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.ListContextsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_contexts( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_contexts_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_contexts( - metadata_service.ListContextsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_contexts_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_contexts), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.ListContextsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListContextsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_contexts( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_contexts_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_contexts( - metadata_service.ListContextsRequest(), - parent='parent_value', - ) - - -def test_list_contexts_pager(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_contexts), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - metadata_service.ListContextsResponse( - contexts=[ - context.Context(), - context.Context(), - context.Context(), - ], - next_page_token='abc', - ), - metadata_service.ListContextsResponse( - contexts=[], - next_page_token='def', - ), - metadata_service.ListContextsResponse( - contexts=[ - context.Context(), - ], - next_page_token='ghi', - ), - metadata_service.ListContextsResponse( - contexts=[ - context.Context(), - context.Context(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_contexts(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, context.Context) - for i in results) - -def test_list_contexts_pages(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_contexts), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - metadata_service.ListContextsResponse( - contexts=[ - context.Context(), - context.Context(), - context.Context(), - ], - next_page_token='abc', - ), - metadata_service.ListContextsResponse( - contexts=[], - next_page_token='def', - ), - metadata_service.ListContextsResponse( - contexts=[ - context.Context(), - ], - next_page_token='ghi', - ), - metadata_service.ListContextsResponse( - contexts=[ - context.Context(), - context.Context(), - ], - ), - RuntimeError, - ) - pages = list(client.list_contexts(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_contexts_async_pager(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_contexts), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - metadata_service.ListContextsResponse( - contexts=[ - context.Context(), - context.Context(), - context.Context(), - ], - next_page_token='abc', - ), - metadata_service.ListContextsResponse( - contexts=[], - next_page_token='def', - ), - metadata_service.ListContextsResponse( - contexts=[ - context.Context(), - ], - next_page_token='ghi', - ), - metadata_service.ListContextsResponse( - contexts=[ - context.Context(), - context.Context(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_contexts(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, context.Context) - for i in responses) - -@pytest.mark.asyncio -async def test_list_contexts_async_pages(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_contexts), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - metadata_service.ListContextsResponse( - contexts=[ - context.Context(), - context.Context(), - context.Context(), - ], - next_page_token='abc', - ), - metadata_service.ListContextsResponse( - contexts=[], - next_page_token='def', - ), - metadata_service.ListContextsResponse( - contexts=[ - context.Context(), - ], - next_page_token='ghi', - ), - metadata_service.ListContextsResponse( - contexts=[ - context.Context(), - context.Context(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_contexts(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_update_context(transport: str = 'grpc', request_type=metadata_service.UpdateContextRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_context), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_context.Context( - name='name_value', - display_name='display_name_value', - etag='etag_value', - parent_contexts=['parent_contexts_value'], - schema_title='schema_title_value', - schema_version='schema_version_value', - description='description_value', - ) - response = client.update_context(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.UpdateContextRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_context.Context) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.etag == 'etag_value' - assert response.parent_contexts == ['parent_contexts_value'] - assert response.schema_title == 'schema_title_value' - assert response.schema_version == 'schema_version_value' - assert response.description == 'description_value' - - -def test_update_context_from_dict(): - test_update_context(request_type=dict) - - -def test_update_context_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_context), - '__call__') as call: - client.update_context() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.UpdateContextRequest() - - -@pytest.mark.asyncio -async def test_update_context_async(transport: str = 'grpc_asyncio', request_type=metadata_service.UpdateContextRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_context), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_context.Context( - name='name_value', - display_name='display_name_value', - etag='etag_value', - parent_contexts=['parent_contexts_value'], - schema_title='schema_title_value', - schema_version='schema_version_value', - description='description_value', - )) - response = await client.update_context(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.UpdateContextRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_context.Context) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.etag == 'etag_value' - assert response.parent_contexts == ['parent_contexts_value'] - assert response.schema_title == 'schema_title_value' - assert response.schema_version == 'schema_version_value' - assert response.description == 'description_value' - - -@pytest.mark.asyncio -async def test_update_context_async_from_dict(): - await test_update_context_async(request_type=dict) - - -def test_update_context_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.UpdateContextRequest() - - request.context.name = 'context.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_context), - '__call__') as call: - call.return_value = gca_context.Context() - client.update_context(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'context.name=context.name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_context_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.UpdateContextRequest() - - request.context.name = 'context.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_context), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_context.Context()) - await client.update_context(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'context.name=context.name/value', - ) in kw['metadata'] - - -def test_update_context_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_context), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_context.Context() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_context( - context=gca_context.Context(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].context - mock_val = gca_context.Context(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -def test_update_context_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_context( - metadata_service.UpdateContextRequest(), - context=gca_context.Context(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.asyncio -async def test_update_context_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_context), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_context.Context() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_context.Context()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_context( - context=gca_context.Context(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].context - mock_val = gca_context.Context(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_update_context_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_context( - metadata_service.UpdateContextRequest(), - context=gca_context.Context(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_delete_context(transport: str = 'grpc', request_type=metadata_service.DeleteContextRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_context), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_context(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.DeleteContextRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_context_from_dict(): - test_delete_context(request_type=dict) - - -def test_delete_context_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_context), - '__call__') as call: - client.delete_context() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.DeleteContextRequest() - - -@pytest.mark.asyncio -async def test_delete_context_async(transport: str = 'grpc_asyncio', request_type=metadata_service.DeleteContextRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_context), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_context(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.DeleteContextRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_context_async_from_dict(): - await test_delete_context_async(request_type=dict) - - -def test_delete_context_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.DeleteContextRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_context), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_context(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_context_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.DeleteContextRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_context), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_context(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_context_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_context), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_context( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_delete_context_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_context( - metadata_service.DeleteContextRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_context_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_context), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_context( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_context_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_context( - metadata_service.DeleteContextRequest(), - name='name_value', - ) - - -def test_purge_contexts(transport: str = 'grpc', request_type=metadata_service.PurgeContextsRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.purge_contexts), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.purge_contexts(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.PurgeContextsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_purge_contexts_from_dict(): - test_purge_contexts(request_type=dict) - - -def test_purge_contexts_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.purge_contexts), - '__call__') as call: - client.purge_contexts() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.PurgeContextsRequest() - - -@pytest.mark.asyncio -async def test_purge_contexts_async(transport: str = 'grpc_asyncio', request_type=metadata_service.PurgeContextsRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.purge_contexts), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.purge_contexts(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.PurgeContextsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_purge_contexts_async_from_dict(): - await test_purge_contexts_async(request_type=dict) - - -def test_purge_contexts_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.PurgeContextsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.purge_contexts), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.purge_contexts(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_purge_contexts_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.PurgeContextsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.purge_contexts), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.purge_contexts(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_purge_contexts_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.purge_contexts), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.purge_contexts( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_purge_contexts_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.purge_contexts( - metadata_service.PurgeContextsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_purge_contexts_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.purge_contexts), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.purge_contexts( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_purge_contexts_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.purge_contexts( - metadata_service.PurgeContextsRequest(), - parent='parent_value', - ) - - -def test_add_context_artifacts_and_executions(transport: str = 'grpc', request_type=metadata_service.AddContextArtifactsAndExecutionsRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_context_artifacts_and_executions), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.AddContextArtifactsAndExecutionsResponse( - ) - response = client.add_context_artifacts_and_executions(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.AddContextArtifactsAndExecutionsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, metadata_service.AddContextArtifactsAndExecutionsResponse) - - -def test_add_context_artifacts_and_executions_from_dict(): - test_add_context_artifacts_and_executions(request_type=dict) - - -def test_add_context_artifacts_and_executions_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_context_artifacts_and_executions), - '__call__') as call: - client.add_context_artifacts_and_executions() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.AddContextArtifactsAndExecutionsRequest() - - -@pytest.mark.asyncio -async def test_add_context_artifacts_and_executions_async(transport: str = 'grpc_asyncio', request_type=metadata_service.AddContextArtifactsAndExecutionsRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_context_artifacts_and_executions), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.AddContextArtifactsAndExecutionsResponse( - )) - response = await client.add_context_artifacts_and_executions(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.AddContextArtifactsAndExecutionsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, metadata_service.AddContextArtifactsAndExecutionsResponse) - - -@pytest.mark.asyncio -async def test_add_context_artifacts_and_executions_async_from_dict(): - await test_add_context_artifacts_and_executions_async(request_type=dict) - - -def test_add_context_artifacts_and_executions_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.AddContextArtifactsAndExecutionsRequest() - - request.context = 'context/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_context_artifacts_and_executions), - '__call__') as call: - call.return_value = metadata_service.AddContextArtifactsAndExecutionsResponse() - client.add_context_artifacts_and_executions(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'context=context/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_add_context_artifacts_and_executions_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.AddContextArtifactsAndExecutionsRequest() - - request.context = 'context/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_context_artifacts_and_executions), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.AddContextArtifactsAndExecutionsResponse()) - await client.add_context_artifacts_and_executions(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'context=context/value', - ) in kw['metadata'] - - -def test_add_context_artifacts_and_executions_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_context_artifacts_and_executions), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.AddContextArtifactsAndExecutionsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.add_context_artifacts_and_executions( - context='context_value', - artifacts=['artifacts_value'], - executions=['executions_value'], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].context - mock_val = 'context_value' - assert arg == mock_val - arg = args[0].artifacts - mock_val = ['artifacts_value'] - assert arg == mock_val - arg = args[0].executions - mock_val = ['executions_value'] - assert arg == mock_val - - -def test_add_context_artifacts_and_executions_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.add_context_artifacts_and_executions( - metadata_service.AddContextArtifactsAndExecutionsRequest(), - context='context_value', - artifacts=['artifacts_value'], - executions=['executions_value'], - ) - - -@pytest.mark.asyncio -async def test_add_context_artifacts_and_executions_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_context_artifacts_and_executions), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.AddContextArtifactsAndExecutionsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.AddContextArtifactsAndExecutionsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.add_context_artifacts_and_executions( - context='context_value', - artifacts=['artifacts_value'], - executions=['executions_value'], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].context - mock_val = 'context_value' - assert arg == mock_val - arg = args[0].artifacts - mock_val = ['artifacts_value'] - assert arg == mock_val - arg = args[0].executions - mock_val = ['executions_value'] - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_add_context_artifacts_and_executions_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.add_context_artifacts_and_executions( - metadata_service.AddContextArtifactsAndExecutionsRequest(), - context='context_value', - artifacts=['artifacts_value'], - executions=['executions_value'], - ) - - -def test_add_context_children(transport: str = 'grpc', request_type=metadata_service.AddContextChildrenRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_context_children), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.AddContextChildrenResponse( - ) - response = client.add_context_children(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.AddContextChildrenRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, metadata_service.AddContextChildrenResponse) - - -def test_add_context_children_from_dict(): - test_add_context_children(request_type=dict) - - -def test_add_context_children_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_context_children), - '__call__') as call: - client.add_context_children() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.AddContextChildrenRequest() - - -@pytest.mark.asyncio -async def test_add_context_children_async(transport: str = 'grpc_asyncio', request_type=metadata_service.AddContextChildrenRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_context_children), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.AddContextChildrenResponse( - )) - response = await client.add_context_children(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.AddContextChildrenRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, metadata_service.AddContextChildrenResponse) - - -@pytest.mark.asyncio -async def test_add_context_children_async_from_dict(): - await test_add_context_children_async(request_type=dict) - - -def test_add_context_children_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.AddContextChildrenRequest() - - request.context = 'context/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_context_children), - '__call__') as call: - call.return_value = metadata_service.AddContextChildrenResponse() - client.add_context_children(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'context=context/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_add_context_children_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.AddContextChildrenRequest() - - request.context = 'context/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_context_children), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.AddContextChildrenResponse()) - await client.add_context_children(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'context=context/value', - ) in kw['metadata'] - - -def test_add_context_children_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_context_children), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.AddContextChildrenResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.add_context_children( - context='context_value', - child_contexts=['child_contexts_value'], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].context - mock_val = 'context_value' - assert arg == mock_val - arg = args[0].child_contexts - mock_val = ['child_contexts_value'] - assert arg == mock_val - - -def test_add_context_children_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.add_context_children( - metadata_service.AddContextChildrenRequest(), - context='context_value', - child_contexts=['child_contexts_value'], - ) - - -@pytest.mark.asyncio -async def test_add_context_children_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_context_children), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.AddContextChildrenResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.AddContextChildrenResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.add_context_children( - context='context_value', - child_contexts=['child_contexts_value'], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].context - mock_val = 'context_value' - assert arg == mock_val - arg = args[0].child_contexts - mock_val = ['child_contexts_value'] - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_add_context_children_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.add_context_children( - metadata_service.AddContextChildrenRequest(), - context='context_value', - child_contexts=['child_contexts_value'], - ) - - -def test_query_context_lineage_subgraph(transport: str = 'grpc', request_type=metadata_service.QueryContextLineageSubgraphRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_context_lineage_subgraph), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = lineage_subgraph.LineageSubgraph( - ) - response = client.query_context_lineage_subgraph(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.QueryContextLineageSubgraphRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, lineage_subgraph.LineageSubgraph) - - -def test_query_context_lineage_subgraph_from_dict(): - test_query_context_lineage_subgraph(request_type=dict) - - -def test_query_context_lineage_subgraph_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_context_lineage_subgraph), - '__call__') as call: - client.query_context_lineage_subgraph() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.QueryContextLineageSubgraphRequest() - - -@pytest.mark.asyncio -async def test_query_context_lineage_subgraph_async(transport: str = 'grpc_asyncio', request_type=metadata_service.QueryContextLineageSubgraphRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_context_lineage_subgraph), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(lineage_subgraph.LineageSubgraph( - )) - response = await client.query_context_lineage_subgraph(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.QueryContextLineageSubgraphRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, lineage_subgraph.LineageSubgraph) - - -@pytest.mark.asyncio -async def test_query_context_lineage_subgraph_async_from_dict(): - await test_query_context_lineage_subgraph_async(request_type=dict) - - -def test_query_context_lineage_subgraph_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.QueryContextLineageSubgraphRequest() - - request.context = 'context/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_context_lineage_subgraph), - '__call__') as call: - call.return_value = lineage_subgraph.LineageSubgraph() - client.query_context_lineage_subgraph(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'context=context/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_query_context_lineage_subgraph_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.QueryContextLineageSubgraphRequest() - - request.context = 'context/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_context_lineage_subgraph), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(lineage_subgraph.LineageSubgraph()) - await client.query_context_lineage_subgraph(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'context=context/value', - ) in kw['metadata'] - - -def test_query_context_lineage_subgraph_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_context_lineage_subgraph), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = lineage_subgraph.LineageSubgraph() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.query_context_lineage_subgraph( - context='context_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].context - mock_val = 'context_value' - assert arg == mock_val - - -def test_query_context_lineage_subgraph_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.query_context_lineage_subgraph( - metadata_service.QueryContextLineageSubgraphRequest(), - context='context_value', - ) - - -@pytest.mark.asyncio -async def test_query_context_lineage_subgraph_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_context_lineage_subgraph), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = lineage_subgraph.LineageSubgraph() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(lineage_subgraph.LineageSubgraph()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.query_context_lineage_subgraph( - context='context_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].context - mock_val = 'context_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_query_context_lineage_subgraph_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.query_context_lineage_subgraph( - metadata_service.QueryContextLineageSubgraphRequest(), - context='context_value', - ) - - -def test_create_execution(transport: str = 'grpc', request_type=metadata_service.CreateExecutionRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_execution), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_execution.Execution( - name='name_value', - display_name='display_name_value', - state=gca_execution.Execution.State.NEW, - etag='etag_value', - schema_title='schema_title_value', - schema_version='schema_version_value', - description='description_value', - ) - response = client.create_execution(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.CreateExecutionRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_execution.Execution) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == gca_execution.Execution.State.NEW - assert response.etag == 'etag_value' - assert response.schema_title == 'schema_title_value' - assert response.schema_version == 'schema_version_value' - assert response.description == 'description_value' - - -def test_create_execution_from_dict(): - test_create_execution(request_type=dict) - - -def test_create_execution_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_execution), - '__call__') as call: - client.create_execution() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.CreateExecutionRequest() - - -@pytest.mark.asyncio -async def test_create_execution_async(transport: str = 'grpc_asyncio', request_type=metadata_service.CreateExecutionRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_execution), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_execution.Execution( - name='name_value', - display_name='display_name_value', - state=gca_execution.Execution.State.NEW, - etag='etag_value', - schema_title='schema_title_value', - schema_version='schema_version_value', - description='description_value', - )) - response = await client.create_execution(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.CreateExecutionRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_execution.Execution) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == gca_execution.Execution.State.NEW - assert response.etag == 'etag_value' - assert response.schema_title == 'schema_title_value' - assert response.schema_version == 'schema_version_value' - assert response.description == 'description_value' - - -@pytest.mark.asyncio -async def test_create_execution_async_from_dict(): - await test_create_execution_async(request_type=dict) - - -def test_create_execution_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.CreateExecutionRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_execution), - '__call__') as call: - call.return_value = gca_execution.Execution() - client.create_execution(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_execution_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.CreateExecutionRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_execution), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_execution.Execution()) - await client.create_execution(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_execution_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_execution), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_execution.Execution() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_execution( - parent='parent_value', - execution=gca_execution.Execution(name='name_value'), - execution_id='execution_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].execution - mock_val = gca_execution.Execution(name='name_value') - assert arg == mock_val - arg = args[0].execution_id - mock_val = 'execution_id_value' - assert arg == mock_val - - -def test_create_execution_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_execution( - metadata_service.CreateExecutionRequest(), - parent='parent_value', - execution=gca_execution.Execution(name='name_value'), - execution_id='execution_id_value', - ) - - -@pytest.mark.asyncio -async def test_create_execution_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_execution), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_execution.Execution() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_execution.Execution()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_execution( - parent='parent_value', - execution=gca_execution.Execution(name='name_value'), - execution_id='execution_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].execution - mock_val = gca_execution.Execution(name='name_value') - assert arg == mock_val - arg = args[0].execution_id - mock_val = 'execution_id_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_execution_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_execution( - metadata_service.CreateExecutionRequest(), - parent='parent_value', - execution=gca_execution.Execution(name='name_value'), - execution_id='execution_id_value', - ) - - -def test_get_execution(transport: str = 'grpc', request_type=metadata_service.GetExecutionRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_execution), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = execution.Execution( - name='name_value', - display_name='display_name_value', - state=execution.Execution.State.NEW, - etag='etag_value', - schema_title='schema_title_value', - schema_version='schema_version_value', - description='description_value', - ) - response = client.get_execution(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.GetExecutionRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, execution.Execution) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == execution.Execution.State.NEW - assert response.etag == 'etag_value' - assert response.schema_title == 'schema_title_value' - assert response.schema_version == 'schema_version_value' - assert response.description == 'description_value' - - -def test_get_execution_from_dict(): - test_get_execution(request_type=dict) - - -def test_get_execution_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_execution), - '__call__') as call: - client.get_execution() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.GetExecutionRequest() - - -@pytest.mark.asyncio -async def test_get_execution_async(transport: str = 'grpc_asyncio', request_type=metadata_service.GetExecutionRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_execution), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(execution.Execution( - name='name_value', - display_name='display_name_value', - state=execution.Execution.State.NEW, - etag='etag_value', - schema_title='schema_title_value', - schema_version='schema_version_value', - description='description_value', - )) - response = await client.get_execution(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.GetExecutionRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, execution.Execution) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == execution.Execution.State.NEW - assert response.etag == 'etag_value' - assert response.schema_title == 'schema_title_value' - assert response.schema_version == 'schema_version_value' - assert response.description == 'description_value' - - -@pytest.mark.asyncio -async def test_get_execution_async_from_dict(): - await test_get_execution_async(request_type=dict) - - -def test_get_execution_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.GetExecutionRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_execution), - '__call__') as call: - call.return_value = execution.Execution() - client.get_execution(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_execution_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.GetExecutionRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_execution), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(execution.Execution()) - await client.get_execution(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_execution_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_execution), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = execution.Execution() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_execution( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_execution_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_execution( - metadata_service.GetExecutionRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_execution_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_execution), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = execution.Execution() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(execution.Execution()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_execution( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_execution_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_execution( - metadata_service.GetExecutionRequest(), - name='name_value', - ) - - -def test_list_executions(transport: str = 'grpc', request_type=metadata_service.ListExecutionsRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_executions), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.ListExecutionsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_executions(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.ListExecutionsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListExecutionsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_executions_from_dict(): - test_list_executions(request_type=dict) - - -def test_list_executions_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_executions), - '__call__') as call: - client.list_executions() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.ListExecutionsRequest() - - -@pytest.mark.asyncio -async def test_list_executions_async(transport: str = 'grpc_asyncio', request_type=metadata_service.ListExecutionsRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_executions), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListExecutionsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_executions(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.ListExecutionsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListExecutionsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_executions_async_from_dict(): - await test_list_executions_async(request_type=dict) - - -def test_list_executions_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.ListExecutionsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_executions), - '__call__') as call: - call.return_value = metadata_service.ListExecutionsResponse() - client.list_executions(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_executions_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.ListExecutionsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_executions), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListExecutionsResponse()) - await client.list_executions(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_executions_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_executions), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.ListExecutionsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_executions( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_executions_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_executions( - metadata_service.ListExecutionsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_executions_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_executions), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.ListExecutionsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListExecutionsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_executions( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_executions_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_executions( - metadata_service.ListExecutionsRequest(), - parent='parent_value', - ) - - -def test_list_executions_pager(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_executions), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - metadata_service.ListExecutionsResponse( - executions=[ - execution.Execution(), - execution.Execution(), - execution.Execution(), - ], - next_page_token='abc', - ), - metadata_service.ListExecutionsResponse( - executions=[], - next_page_token='def', - ), - metadata_service.ListExecutionsResponse( - executions=[ - execution.Execution(), - ], - next_page_token='ghi', - ), - metadata_service.ListExecutionsResponse( - executions=[ - execution.Execution(), - execution.Execution(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_executions(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, execution.Execution) - for i in results) - -def test_list_executions_pages(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_executions), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - metadata_service.ListExecutionsResponse( - executions=[ - execution.Execution(), - execution.Execution(), - execution.Execution(), - ], - next_page_token='abc', - ), - metadata_service.ListExecutionsResponse( - executions=[], - next_page_token='def', - ), - metadata_service.ListExecutionsResponse( - executions=[ - execution.Execution(), - ], - next_page_token='ghi', - ), - metadata_service.ListExecutionsResponse( - executions=[ - execution.Execution(), - execution.Execution(), - ], - ), - RuntimeError, - ) - pages = list(client.list_executions(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_executions_async_pager(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_executions), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - metadata_service.ListExecutionsResponse( - executions=[ - execution.Execution(), - execution.Execution(), - execution.Execution(), - ], - next_page_token='abc', - ), - metadata_service.ListExecutionsResponse( - executions=[], - next_page_token='def', - ), - metadata_service.ListExecutionsResponse( - executions=[ - execution.Execution(), - ], - next_page_token='ghi', - ), - metadata_service.ListExecutionsResponse( - executions=[ - execution.Execution(), - execution.Execution(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_executions(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, execution.Execution) - for i in responses) - -@pytest.mark.asyncio -async def test_list_executions_async_pages(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_executions), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - metadata_service.ListExecutionsResponse( - executions=[ - execution.Execution(), - execution.Execution(), - execution.Execution(), - ], - next_page_token='abc', - ), - metadata_service.ListExecutionsResponse( - executions=[], - next_page_token='def', - ), - metadata_service.ListExecutionsResponse( - executions=[ - execution.Execution(), - ], - next_page_token='ghi', - ), - metadata_service.ListExecutionsResponse( - executions=[ - execution.Execution(), - execution.Execution(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_executions(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_update_execution(transport: str = 'grpc', request_type=metadata_service.UpdateExecutionRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_execution), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_execution.Execution( - name='name_value', - display_name='display_name_value', - state=gca_execution.Execution.State.NEW, - etag='etag_value', - schema_title='schema_title_value', - schema_version='schema_version_value', - description='description_value', - ) - response = client.update_execution(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.UpdateExecutionRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_execution.Execution) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == gca_execution.Execution.State.NEW - assert response.etag == 'etag_value' - assert response.schema_title == 'schema_title_value' - assert response.schema_version == 'schema_version_value' - assert response.description == 'description_value' - - -def test_update_execution_from_dict(): - test_update_execution(request_type=dict) - - -def test_update_execution_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_execution), - '__call__') as call: - client.update_execution() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.UpdateExecutionRequest() - - -@pytest.mark.asyncio -async def test_update_execution_async(transport: str = 'grpc_asyncio', request_type=metadata_service.UpdateExecutionRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_execution), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_execution.Execution( - name='name_value', - display_name='display_name_value', - state=gca_execution.Execution.State.NEW, - etag='etag_value', - schema_title='schema_title_value', - schema_version='schema_version_value', - description='description_value', - )) - response = await client.update_execution(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.UpdateExecutionRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_execution.Execution) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == gca_execution.Execution.State.NEW - assert response.etag == 'etag_value' - assert response.schema_title == 'schema_title_value' - assert response.schema_version == 'schema_version_value' - assert response.description == 'description_value' - - -@pytest.mark.asyncio -async def test_update_execution_async_from_dict(): - await test_update_execution_async(request_type=dict) - - -def test_update_execution_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.UpdateExecutionRequest() - - request.execution.name = 'execution.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_execution), - '__call__') as call: - call.return_value = gca_execution.Execution() - client.update_execution(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'execution.name=execution.name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_execution_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.UpdateExecutionRequest() - - request.execution.name = 'execution.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_execution), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_execution.Execution()) - await client.update_execution(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'execution.name=execution.name/value', - ) in kw['metadata'] - - -def test_update_execution_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_execution), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_execution.Execution() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_execution( - execution=gca_execution.Execution(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].execution - mock_val = gca_execution.Execution(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -def test_update_execution_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_execution( - metadata_service.UpdateExecutionRequest(), - execution=gca_execution.Execution(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.asyncio -async def test_update_execution_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_execution), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_execution.Execution() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_execution.Execution()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_execution( - execution=gca_execution.Execution(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].execution - mock_val = gca_execution.Execution(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_update_execution_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_execution( - metadata_service.UpdateExecutionRequest(), - execution=gca_execution.Execution(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_delete_execution(transport: str = 'grpc', request_type=metadata_service.DeleteExecutionRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_execution), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_execution(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.DeleteExecutionRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_execution_from_dict(): - test_delete_execution(request_type=dict) - - -def test_delete_execution_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_execution), - '__call__') as call: - client.delete_execution() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.DeleteExecutionRequest() - - -@pytest.mark.asyncio -async def test_delete_execution_async(transport: str = 'grpc_asyncio', request_type=metadata_service.DeleteExecutionRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_execution), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_execution(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.DeleteExecutionRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_execution_async_from_dict(): - await test_delete_execution_async(request_type=dict) - - -def test_delete_execution_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.DeleteExecutionRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_execution), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_execution(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_execution_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.DeleteExecutionRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_execution), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_execution(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_execution_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_execution), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_execution( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_delete_execution_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_execution( - metadata_service.DeleteExecutionRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_execution_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_execution), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_execution( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_execution_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_execution( - metadata_service.DeleteExecutionRequest(), - name='name_value', - ) - - -def test_purge_executions(transport: str = 'grpc', request_type=metadata_service.PurgeExecutionsRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.purge_executions), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.purge_executions(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.PurgeExecutionsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_purge_executions_from_dict(): - test_purge_executions(request_type=dict) - - -def test_purge_executions_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.purge_executions), - '__call__') as call: - client.purge_executions() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.PurgeExecutionsRequest() - - -@pytest.mark.asyncio -async def test_purge_executions_async(transport: str = 'grpc_asyncio', request_type=metadata_service.PurgeExecutionsRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.purge_executions), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.purge_executions(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.PurgeExecutionsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_purge_executions_async_from_dict(): - await test_purge_executions_async(request_type=dict) - - -def test_purge_executions_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.PurgeExecutionsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.purge_executions), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.purge_executions(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_purge_executions_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.PurgeExecutionsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.purge_executions), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.purge_executions(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_purge_executions_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.purge_executions), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.purge_executions( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_purge_executions_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.purge_executions( - metadata_service.PurgeExecutionsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_purge_executions_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.purge_executions), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.purge_executions( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_purge_executions_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.purge_executions( - metadata_service.PurgeExecutionsRequest(), - parent='parent_value', - ) - - -def test_add_execution_events(transport: str = 'grpc', request_type=metadata_service.AddExecutionEventsRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_execution_events), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.AddExecutionEventsResponse( - ) - response = client.add_execution_events(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.AddExecutionEventsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, metadata_service.AddExecutionEventsResponse) - - -def test_add_execution_events_from_dict(): - test_add_execution_events(request_type=dict) - - -def test_add_execution_events_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_execution_events), - '__call__') as call: - client.add_execution_events() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.AddExecutionEventsRequest() - - -@pytest.mark.asyncio -async def test_add_execution_events_async(transport: str = 'grpc_asyncio', request_type=metadata_service.AddExecutionEventsRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_execution_events), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.AddExecutionEventsResponse( - )) - response = await client.add_execution_events(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.AddExecutionEventsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, metadata_service.AddExecutionEventsResponse) - - -@pytest.mark.asyncio -async def test_add_execution_events_async_from_dict(): - await test_add_execution_events_async(request_type=dict) - - -def test_add_execution_events_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.AddExecutionEventsRequest() - - request.execution = 'execution/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_execution_events), - '__call__') as call: - call.return_value = metadata_service.AddExecutionEventsResponse() - client.add_execution_events(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'execution=execution/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_add_execution_events_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.AddExecutionEventsRequest() - - request.execution = 'execution/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_execution_events), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.AddExecutionEventsResponse()) - await client.add_execution_events(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'execution=execution/value', - ) in kw['metadata'] - - -def test_add_execution_events_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_execution_events), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.AddExecutionEventsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.add_execution_events( - execution='execution_value', - events=[event.Event(artifact='artifact_value')], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].execution - mock_val = 'execution_value' - assert arg == mock_val - arg = args[0].events - mock_val = [event.Event(artifact='artifact_value')] - assert arg == mock_val - - -def test_add_execution_events_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.add_execution_events( - metadata_service.AddExecutionEventsRequest(), - execution='execution_value', - events=[event.Event(artifact='artifact_value')], - ) - - -@pytest.mark.asyncio -async def test_add_execution_events_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_execution_events), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.AddExecutionEventsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.AddExecutionEventsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.add_execution_events( - execution='execution_value', - events=[event.Event(artifact='artifact_value')], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].execution - mock_val = 'execution_value' - assert arg == mock_val - arg = args[0].events - mock_val = [event.Event(artifact='artifact_value')] - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_add_execution_events_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.add_execution_events( - metadata_service.AddExecutionEventsRequest(), - execution='execution_value', - events=[event.Event(artifact='artifact_value')], - ) - - -def test_query_execution_inputs_and_outputs(transport: str = 'grpc', request_type=metadata_service.QueryExecutionInputsAndOutputsRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_execution_inputs_and_outputs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = lineage_subgraph.LineageSubgraph( - ) - response = client.query_execution_inputs_and_outputs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.QueryExecutionInputsAndOutputsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, lineage_subgraph.LineageSubgraph) - - -def test_query_execution_inputs_and_outputs_from_dict(): - test_query_execution_inputs_and_outputs(request_type=dict) - - -def test_query_execution_inputs_and_outputs_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_execution_inputs_and_outputs), - '__call__') as call: - client.query_execution_inputs_and_outputs() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.QueryExecutionInputsAndOutputsRequest() - - -@pytest.mark.asyncio -async def test_query_execution_inputs_and_outputs_async(transport: str = 'grpc_asyncio', request_type=metadata_service.QueryExecutionInputsAndOutputsRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_execution_inputs_and_outputs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(lineage_subgraph.LineageSubgraph( - )) - response = await client.query_execution_inputs_and_outputs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.QueryExecutionInputsAndOutputsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, lineage_subgraph.LineageSubgraph) - - -@pytest.mark.asyncio -async def test_query_execution_inputs_and_outputs_async_from_dict(): - await test_query_execution_inputs_and_outputs_async(request_type=dict) - - -def test_query_execution_inputs_and_outputs_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.QueryExecutionInputsAndOutputsRequest() - - request.execution = 'execution/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_execution_inputs_and_outputs), - '__call__') as call: - call.return_value = lineage_subgraph.LineageSubgraph() - client.query_execution_inputs_and_outputs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'execution=execution/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_query_execution_inputs_and_outputs_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.QueryExecutionInputsAndOutputsRequest() - - request.execution = 'execution/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_execution_inputs_and_outputs), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(lineage_subgraph.LineageSubgraph()) - await client.query_execution_inputs_and_outputs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'execution=execution/value', - ) in kw['metadata'] - - -def test_query_execution_inputs_and_outputs_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_execution_inputs_and_outputs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = lineage_subgraph.LineageSubgraph() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.query_execution_inputs_and_outputs( - execution='execution_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].execution - mock_val = 'execution_value' - assert arg == mock_val - - -def test_query_execution_inputs_and_outputs_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.query_execution_inputs_and_outputs( - metadata_service.QueryExecutionInputsAndOutputsRequest(), - execution='execution_value', - ) - - -@pytest.mark.asyncio -async def test_query_execution_inputs_and_outputs_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_execution_inputs_and_outputs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = lineage_subgraph.LineageSubgraph() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(lineage_subgraph.LineageSubgraph()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.query_execution_inputs_and_outputs( - execution='execution_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].execution - mock_val = 'execution_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_query_execution_inputs_and_outputs_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.query_execution_inputs_and_outputs( - metadata_service.QueryExecutionInputsAndOutputsRequest(), - execution='execution_value', - ) - - -def test_create_metadata_schema(transport: str = 'grpc', request_type=metadata_service.CreateMetadataSchemaRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_metadata_schema), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_metadata_schema.MetadataSchema( - name='name_value', - schema_version='schema_version_value', - schema='schema_value', - schema_type=gca_metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE, - description='description_value', - ) - response = client.create_metadata_schema(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.CreateMetadataSchemaRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_metadata_schema.MetadataSchema) - assert response.name == 'name_value' - assert response.schema_version == 'schema_version_value' - assert response.schema == 'schema_value' - assert response.schema_type == gca_metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE - assert response.description == 'description_value' - - -def test_create_metadata_schema_from_dict(): - test_create_metadata_schema(request_type=dict) - - -def test_create_metadata_schema_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_metadata_schema), - '__call__') as call: - client.create_metadata_schema() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.CreateMetadataSchemaRequest() - - -@pytest.mark.asyncio -async def test_create_metadata_schema_async(transport: str = 'grpc_asyncio', request_type=metadata_service.CreateMetadataSchemaRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_metadata_schema), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_metadata_schema.MetadataSchema( - name='name_value', - schema_version='schema_version_value', - schema='schema_value', - schema_type=gca_metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE, - description='description_value', - )) - response = await client.create_metadata_schema(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.CreateMetadataSchemaRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_metadata_schema.MetadataSchema) - assert response.name == 'name_value' - assert response.schema_version == 'schema_version_value' - assert response.schema == 'schema_value' - assert response.schema_type == gca_metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE - assert response.description == 'description_value' - - -@pytest.mark.asyncio -async def test_create_metadata_schema_async_from_dict(): - await test_create_metadata_schema_async(request_type=dict) - - -def test_create_metadata_schema_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.CreateMetadataSchemaRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_metadata_schema), - '__call__') as call: - call.return_value = gca_metadata_schema.MetadataSchema() - client.create_metadata_schema(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_metadata_schema_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.CreateMetadataSchemaRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_metadata_schema), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_metadata_schema.MetadataSchema()) - await client.create_metadata_schema(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_metadata_schema_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_metadata_schema), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_metadata_schema.MetadataSchema() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_metadata_schema( - parent='parent_value', - metadata_schema=gca_metadata_schema.MetadataSchema(name='name_value'), - metadata_schema_id='metadata_schema_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].metadata_schema - mock_val = gca_metadata_schema.MetadataSchema(name='name_value') - assert arg == mock_val - arg = args[0].metadata_schema_id - mock_val = 'metadata_schema_id_value' - assert arg == mock_val - - -def test_create_metadata_schema_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_metadata_schema( - metadata_service.CreateMetadataSchemaRequest(), - parent='parent_value', - metadata_schema=gca_metadata_schema.MetadataSchema(name='name_value'), - metadata_schema_id='metadata_schema_id_value', - ) - - -@pytest.mark.asyncio -async def test_create_metadata_schema_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_metadata_schema), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_metadata_schema.MetadataSchema() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_metadata_schema.MetadataSchema()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_metadata_schema( - parent='parent_value', - metadata_schema=gca_metadata_schema.MetadataSchema(name='name_value'), - metadata_schema_id='metadata_schema_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].metadata_schema - mock_val = gca_metadata_schema.MetadataSchema(name='name_value') - assert arg == mock_val - arg = args[0].metadata_schema_id - mock_val = 'metadata_schema_id_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_metadata_schema_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_metadata_schema( - metadata_service.CreateMetadataSchemaRequest(), - parent='parent_value', - metadata_schema=gca_metadata_schema.MetadataSchema(name='name_value'), - metadata_schema_id='metadata_schema_id_value', - ) - - -def test_get_metadata_schema(transport: str = 'grpc', request_type=metadata_service.GetMetadataSchemaRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_metadata_schema), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_schema.MetadataSchema( - name='name_value', - schema_version='schema_version_value', - schema='schema_value', - schema_type=metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE, - description='description_value', - ) - response = client.get_metadata_schema(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.GetMetadataSchemaRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, metadata_schema.MetadataSchema) - assert response.name == 'name_value' - assert response.schema_version == 'schema_version_value' - assert response.schema == 'schema_value' - assert response.schema_type == metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE - assert response.description == 'description_value' - - -def test_get_metadata_schema_from_dict(): - test_get_metadata_schema(request_type=dict) - - -def test_get_metadata_schema_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_metadata_schema), - '__call__') as call: - client.get_metadata_schema() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.GetMetadataSchemaRequest() - - -@pytest.mark.asyncio -async def test_get_metadata_schema_async(transport: str = 'grpc_asyncio', request_type=metadata_service.GetMetadataSchemaRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_metadata_schema), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(metadata_schema.MetadataSchema( - name='name_value', - schema_version='schema_version_value', - schema='schema_value', - schema_type=metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE, - description='description_value', - )) - response = await client.get_metadata_schema(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.GetMetadataSchemaRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, metadata_schema.MetadataSchema) - assert response.name == 'name_value' - assert response.schema_version == 'schema_version_value' - assert response.schema == 'schema_value' - assert response.schema_type == metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE - assert response.description == 'description_value' - - -@pytest.mark.asyncio -async def test_get_metadata_schema_async_from_dict(): - await test_get_metadata_schema_async(request_type=dict) - - -def test_get_metadata_schema_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.GetMetadataSchemaRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_metadata_schema), - '__call__') as call: - call.return_value = metadata_schema.MetadataSchema() - client.get_metadata_schema(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_metadata_schema_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.GetMetadataSchemaRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_metadata_schema), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_schema.MetadataSchema()) - await client.get_metadata_schema(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_metadata_schema_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_metadata_schema), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_schema.MetadataSchema() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_metadata_schema( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_metadata_schema_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_metadata_schema( - metadata_service.GetMetadataSchemaRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_metadata_schema_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_metadata_schema), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_schema.MetadataSchema() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_schema.MetadataSchema()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_metadata_schema( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_metadata_schema_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_metadata_schema( - metadata_service.GetMetadataSchemaRequest(), - name='name_value', - ) - - -def test_list_metadata_schemas(transport: str = 'grpc', request_type=metadata_service.ListMetadataSchemasRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_schemas), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.ListMetadataSchemasResponse( - next_page_token='next_page_token_value', - ) - response = client.list_metadata_schemas(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.ListMetadataSchemasRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListMetadataSchemasPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_metadata_schemas_from_dict(): - test_list_metadata_schemas(request_type=dict) - - -def test_list_metadata_schemas_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_schemas), - '__call__') as call: - client.list_metadata_schemas() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.ListMetadataSchemasRequest() - - -@pytest.mark.asyncio -async def test_list_metadata_schemas_async(transport: str = 'grpc_asyncio', request_type=metadata_service.ListMetadataSchemasRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_schemas), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListMetadataSchemasResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_metadata_schemas(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.ListMetadataSchemasRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListMetadataSchemasAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_metadata_schemas_async_from_dict(): - await test_list_metadata_schemas_async(request_type=dict) - - -def test_list_metadata_schemas_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.ListMetadataSchemasRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_schemas), - '__call__') as call: - call.return_value = metadata_service.ListMetadataSchemasResponse() - client.list_metadata_schemas(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_metadata_schemas_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.ListMetadataSchemasRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_schemas), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListMetadataSchemasResponse()) - await client.list_metadata_schemas(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_metadata_schemas_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_schemas), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.ListMetadataSchemasResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_metadata_schemas( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_metadata_schemas_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_metadata_schemas( - metadata_service.ListMetadataSchemasRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_metadata_schemas_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_schemas), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.ListMetadataSchemasResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListMetadataSchemasResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_metadata_schemas( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_metadata_schemas_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_metadata_schemas( - metadata_service.ListMetadataSchemasRequest(), - parent='parent_value', - ) - - -def test_list_metadata_schemas_pager(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_schemas), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - metadata_service.ListMetadataSchemasResponse( - metadata_schemas=[ - metadata_schema.MetadataSchema(), - metadata_schema.MetadataSchema(), - metadata_schema.MetadataSchema(), - ], - next_page_token='abc', - ), - metadata_service.ListMetadataSchemasResponse( - metadata_schemas=[], - next_page_token='def', - ), - metadata_service.ListMetadataSchemasResponse( - metadata_schemas=[ - metadata_schema.MetadataSchema(), - ], - next_page_token='ghi', - ), - metadata_service.ListMetadataSchemasResponse( - metadata_schemas=[ - metadata_schema.MetadataSchema(), - metadata_schema.MetadataSchema(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_metadata_schemas(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, metadata_schema.MetadataSchema) - for i in results) - -def test_list_metadata_schemas_pages(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_schemas), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - metadata_service.ListMetadataSchemasResponse( - metadata_schemas=[ - metadata_schema.MetadataSchema(), - metadata_schema.MetadataSchema(), - metadata_schema.MetadataSchema(), - ], - next_page_token='abc', - ), - metadata_service.ListMetadataSchemasResponse( - metadata_schemas=[], - next_page_token='def', - ), - metadata_service.ListMetadataSchemasResponse( - metadata_schemas=[ - metadata_schema.MetadataSchema(), - ], - next_page_token='ghi', - ), - metadata_service.ListMetadataSchemasResponse( - metadata_schemas=[ - metadata_schema.MetadataSchema(), - metadata_schema.MetadataSchema(), - ], - ), - RuntimeError, - ) - pages = list(client.list_metadata_schemas(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_metadata_schemas_async_pager(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_schemas), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - metadata_service.ListMetadataSchemasResponse( - metadata_schemas=[ - metadata_schema.MetadataSchema(), - metadata_schema.MetadataSchema(), - metadata_schema.MetadataSchema(), - ], - next_page_token='abc', - ), - metadata_service.ListMetadataSchemasResponse( - metadata_schemas=[], - next_page_token='def', - ), - metadata_service.ListMetadataSchemasResponse( - metadata_schemas=[ - metadata_schema.MetadataSchema(), - ], - next_page_token='ghi', - ), - metadata_service.ListMetadataSchemasResponse( - metadata_schemas=[ - metadata_schema.MetadataSchema(), - metadata_schema.MetadataSchema(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_metadata_schemas(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, metadata_schema.MetadataSchema) - for i in responses) - -@pytest.mark.asyncio -async def test_list_metadata_schemas_async_pages(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_schemas), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - metadata_service.ListMetadataSchemasResponse( - metadata_schemas=[ - metadata_schema.MetadataSchema(), - metadata_schema.MetadataSchema(), - metadata_schema.MetadataSchema(), - ], - next_page_token='abc', - ), - metadata_service.ListMetadataSchemasResponse( - metadata_schemas=[], - next_page_token='def', - ), - metadata_service.ListMetadataSchemasResponse( - metadata_schemas=[ - metadata_schema.MetadataSchema(), - ], - next_page_token='ghi', - ), - metadata_service.ListMetadataSchemasResponse( - metadata_schemas=[ - metadata_schema.MetadataSchema(), - metadata_schema.MetadataSchema(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_metadata_schemas(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_query_artifact_lineage_subgraph(transport: str = 'grpc', request_type=metadata_service.QueryArtifactLineageSubgraphRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_artifact_lineage_subgraph), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = lineage_subgraph.LineageSubgraph( - ) - response = client.query_artifact_lineage_subgraph(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.QueryArtifactLineageSubgraphRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, lineage_subgraph.LineageSubgraph) - - -def test_query_artifact_lineage_subgraph_from_dict(): - test_query_artifact_lineage_subgraph(request_type=dict) - - -def test_query_artifact_lineage_subgraph_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_artifact_lineage_subgraph), - '__call__') as call: - client.query_artifact_lineage_subgraph() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.QueryArtifactLineageSubgraphRequest() - - -@pytest.mark.asyncio -async def test_query_artifact_lineage_subgraph_async(transport: str = 'grpc_asyncio', request_type=metadata_service.QueryArtifactLineageSubgraphRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_artifact_lineage_subgraph), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(lineage_subgraph.LineageSubgraph( - )) - response = await client.query_artifact_lineage_subgraph(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.QueryArtifactLineageSubgraphRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, lineage_subgraph.LineageSubgraph) - - -@pytest.mark.asyncio -async def test_query_artifact_lineage_subgraph_async_from_dict(): - await test_query_artifact_lineage_subgraph_async(request_type=dict) - - -def test_query_artifact_lineage_subgraph_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.QueryArtifactLineageSubgraphRequest() - - request.artifact = 'artifact/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_artifact_lineage_subgraph), - '__call__') as call: - call.return_value = lineage_subgraph.LineageSubgraph() - client.query_artifact_lineage_subgraph(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'artifact=artifact/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_query_artifact_lineage_subgraph_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.QueryArtifactLineageSubgraphRequest() - - request.artifact = 'artifact/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_artifact_lineage_subgraph), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(lineage_subgraph.LineageSubgraph()) - await client.query_artifact_lineage_subgraph(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'artifact=artifact/value', - ) in kw['metadata'] - - -def test_query_artifact_lineage_subgraph_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_artifact_lineage_subgraph), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = lineage_subgraph.LineageSubgraph() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.query_artifact_lineage_subgraph( - artifact='artifact_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].artifact - mock_val = 'artifact_value' - assert arg == mock_val - - -def test_query_artifact_lineage_subgraph_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.query_artifact_lineage_subgraph( - metadata_service.QueryArtifactLineageSubgraphRequest(), - artifact='artifact_value', - ) - - -@pytest.mark.asyncio -async def test_query_artifact_lineage_subgraph_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_artifact_lineage_subgraph), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = lineage_subgraph.LineageSubgraph() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(lineage_subgraph.LineageSubgraph()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.query_artifact_lineage_subgraph( - artifact='artifact_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].artifact - mock_val = 'artifact_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_query_artifact_lineage_subgraph_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.query_artifact_lineage_subgraph( - metadata_service.QueryArtifactLineageSubgraphRequest(), - artifact='artifact_value', - ) - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.MetadataServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.MetadataServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = MetadataServiceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.MetadataServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = MetadataServiceClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.MetadataServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = MetadataServiceClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.MetadataServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.MetadataServiceGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.MetadataServiceGrpcTransport, - transports.MetadataServiceGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.MetadataServiceGrpcTransport, - ) - -def test_metadata_service_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.MetadataServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_metadata_service_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.aiplatform_v1.services.metadata_service.transports.MetadataServiceTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.MetadataServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'create_metadata_store', - 'get_metadata_store', - 'list_metadata_stores', - 'delete_metadata_store', - 'create_artifact', - 'get_artifact', - 'list_artifacts', - 'update_artifact', - 'delete_artifact', - 'purge_artifacts', - 'create_context', - 'get_context', - 'list_contexts', - 'update_context', - 'delete_context', - 'purge_contexts', - 'add_context_artifacts_and_executions', - 'add_context_children', - 'query_context_lineage_subgraph', - 'create_execution', - 'get_execution', - 'list_executions', - 'update_execution', - 'delete_execution', - 'purge_executions', - 'add_execution_events', - 'query_execution_inputs_and_outputs', - 'create_metadata_schema', - 'get_metadata_schema', - 'list_metadata_schemas', - 'query_artifact_lineage_subgraph', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - with pytest.raises(NotImplementedError): - transport.close() - - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client - - -def test_metadata_service_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.metadata_service.transports.MetadataServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.MetadataServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -def test_metadata_service_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1.services.metadata_service.transports.MetadataServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.MetadataServiceTransport() - adc.assert_called_once() - - -def test_metadata_service_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - MetadataServiceClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.MetadataServiceGrpcTransport, - transports.MetadataServiceGrpcAsyncIOTransport, - ], -) -def test_metadata_service_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.MetadataServiceGrpcTransport, grpc_helpers), - (transports.MetadataServiceGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_metadata_service_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "aiplatform.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="aiplatform.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.MetadataServiceGrpcTransport, transports.MetadataServiceGrpcAsyncIOTransport]) -def test_metadata_service_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_metadata_service_host_no_port(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_metadata_service_host_with_port(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:8000' - -def test_metadata_service_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.MetadataServiceGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_metadata_service_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.MetadataServiceGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.MetadataServiceGrpcTransport, transports.MetadataServiceGrpcAsyncIOTransport]) -def test_metadata_service_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.MetadataServiceGrpcTransport, transports.MetadataServiceGrpcAsyncIOTransport]) -def test_metadata_service_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_metadata_service_grpc_lro_client(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_metadata_service_grpc_lro_async_client(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc_asyncio', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsAsyncClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_artifact_path(): - project = "squid" - location = "clam" - metadata_store = "whelk" - artifact = "octopus" - expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}".format(project=project, location=location, metadata_store=metadata_store, artifact=artifact, ) - actual = MetadataServiceClient.artifact_path(project, location, metadata_store, artifact) - assert expected == actual - - -def test_parse_artifact_path(): - expected = { - "project": "oyster", - "location": "nudibranch", - "metadata_store": "cuttlefish", - "artifact": "mussel", - } - path = MetadataServiceClient.artifact_path(**expected) - - # Check that the path construction is reversible. - actual = MetadataServiceClient.parse_artifact_path(path) - assert expected == actual - -def test_context_path(): - project = "winkle" - location = "nautilus" - metadata_store = "scallop" - context = "abalone" - expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}".format(project=project, location=location, metadata_store=metadata_store, context=context, ) - actual = MetadataServiceClient.context_path(project, location, metadata_store, context) - assert expected == actual - - -def test_parse_context_path(): - expected = { - "project": "squid", - "location": "clam", - "metadata_store": "whelk", - "context": "octopus", - } - path = MetadataServiceClient.context_path(**expected) - - # Check that the path construction is reversible. - actual = MetadataServiceClient.parse_context_path(path) - assert expected == actual - -def test_execution_path(): - project = "oyster" - location = "nudibranch" - metadata_store = "cuttlefish" - execution = "mussel" - expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}".format(project=project, location=location, metadata_store=metadata_store, execution=execution, ) - actual = MetadataServiceClient.execution_path(project, location, metadata_store, execution) - assert expected == actual - - -def test_parse_execution_path(): - expected = { - "project": "winkle", - "location": "nautilus", - "metadata_store": "scallop", - "execution": "abalone", - } - path = MetadataServiceClient.execution_path(**expected) - - # Check that the path construction is reversible. - actual = MetadataServiceClient.parse_execution_path(path) - assert expected == actual - -def test_metadata_schema_path(): - project = "squid" - location = "clam" - metadata_store = "whelk" - metadata_schema = "octopus" - expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/metadataSchemas/{metadata_schema}".format(project=project, location=location, metadata_store=metadata_store, metadata_schema=metadata_schema, ) - actual = MetadataServiceClient.metadata_schema_path(project, location, metadata_store, metadata_schema) - assert expected == actual - - -def test_parse_metadata_schema_path(): - expected = { - "project": "oyster", - "location": "nudibranch", - "metadata_store": "cuttlefish", - "metadata_schema": "mussel", - } - path = MetadataServiceClient.metadata_schema_path(**expected) - - # Check that the path construction is reversible. - actual = MetadataServiceClient.parse_metadata_schema_path(path) - assert expected == actual - -def test_metadata_store_path(): - project = "winkle" - location = "nautilus" - metadata_store = "scallop" - expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}".format(project=project, location=location, metadata_store=metadata_store, ) - actual = MetadataServiceClient.metadata_store_path(project, location, metadata_store) - assert expected == actual - - -def test_parse_metadata_store_path(): - expected = { - "project": "abalone", - "location": "squid", - "metadata_store": "clam", - } - path = MetadataServiceClient.metadata_store_path(**expected) - - # Check that the path construction is reversible. - actual = MetadataServiceClient.parse_metadata_store_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "whelk" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = MetadataServiceClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "octopus", - } - path = MetadataServiceClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = MetadataServiceClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "oyster" - expected = "folders/{folder}".format(folder=folder, ) - actual = MetadataServiceClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "nudibranch", - } - path = MetadataServiceClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = MetadataServiceClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "cuttlefish" - expected = "organizations/{organization}".format(organization=organization, ) - actual = MetadataServiceClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "mussel", - } - path = MetadataServiceClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = MetadataServiceClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "winkle" - expected = "projects/{project}".format(project=project, ) - actual = MetadataServiceClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "nautilus", - } - path = MetadataServiceClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = MetadataServiceClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "scallop" - location = "abalone" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = MetadataServiceClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "squid", - "location": "clam", - } - path = MetadataServiceClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = MetadataServiceClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.MetadataServiceTransport, '_prep_wrapped_messages') as prep: - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.MetadataServiceTransport, '_prep_wrapped_messages') as prep: - transport_class = MetadataServiceClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - -@pytest.mark.asyncio -async def test_transport_close_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: - async with client: - close.assert_not_called() - close.assert_called_once() - -def test_transport_close(): - transports = { - "grpc": "_grpc_channel", - } - - for transport, close_name in transports.items(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: - with client: - close.assert_not_called() - close.assert_called_once() - -def test_client_ctx(): - transports = [ - 'grpc', - ] - for transport in transports: - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - # Test client calls underlying transport. - with mock.patch.object(type(client.transport), "close") as close: - close.assert_not_called() - with client: - pass - close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_migration_service.py b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_migration_service.py deleted file mode 100644 index bd5e00825f..0000000000 --- a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_migration_service.py +++ /dev/null @@ -1,1748 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import future -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import operation_async # type: ignore -from google.api_core import operations_v1 -from google.api_core import path_template -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.aiplatform_v1.services.migration_service import MigrationServiceAsyncClient -from google.cloud.aiplatform_v1.services.migration_service import MigrationServiceClient -from google.cloud.aiplatform_v1.services.migration_service import pagers -from google.cloud.aiplatform_v1.services.migration_service import transports -from google.cloud.aiplatform_v1.types import migratable_resource -from google.cloud.aiplatform_v1.types import migration_service -from google.longrunning import operations_pb2 -from google.oauth2 import service_account -import google.auth - - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert MigrationServiceClient._get_default_mtls_endpoint(None) is None - assert MigrationServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert MigrationServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert MigrationServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert MigrationServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert MigrationServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - MigrationServiceClient, - MigrationServiceAsyncClient, -]) -def test_migration_service_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.MigrationServiceGrpcTransport, "grpc"), - (transports.MigrationServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_migration_service_client_service_account_always_use_jwt(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=False) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("client_class", [ - MigrationServiceClient, - MigrationServiceAsyncClient, -]) -def test_migration_service_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_migration_service_client_get_transport_class(): - transport = MigrationServiceClient.get_transport_class() - available_transports = [ - transports.MigrationServiceGrpcTransport, - ] - assert transport in available_transports - - transport = MigrationServiceClient.get_transport_class("grpc") - assert transport == transports.MigrationServiceGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (MigrationServiceClient, transports.MigrationServiceGrpcTransport, "grpc"), - (MigrationServiceAsyncClient, transports.MigrationServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(MigrationServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MigrationServiceClient)) -@mock.patch.object(MigrationServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MigrationServiceAsyncClient)) -def test_migration_service_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(MigrationServiceClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(MigrationServiceClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (MigrationServiceClient, transports.MigrationServiceGrpcTransport, "grpc", "true"), - (MigrationServiceAsyncClient, transports.MigrationServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (MigrationServiceClient, transports.MigrationServiceGrpcTransport, "grpc", "false"), - (MigrationServiceAsyncClient, transports.MigrationServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(MigrationServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MigrationServiceClient)) -@mock.patch.object(MigrationServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MigrationServiceAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_migration_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (MigrationServiceClient, transports.MigrationServiceGrpcTransport, "grpc"), - (MigrationServiceAsyncClient, transports.MigrationServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_migration_service_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (MigrationServiceClient, transports.MigrationServiceGrpcTransport, "grpc"), - (MigrationServiceAsyncClient, transports.MigrationServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_migration_service_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_migration_service_client_client_options_from_dict(): - with mock.patch('google.cloud.aiplatform_v1.services.migration_service.transports.MigrationServiceGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = MigrationServiceClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_search_migratable_resources(transport: str = 'grpc', request_type=migration_service.SearchMigratableResourcesRequest): - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_migratable_resources), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = migration_service.SearchMigratableResourcesResponse( - next_page_token='next_page_token_value', - ) - response = client.search_migratable_resources(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == migration_service.SearchMigratableResourcesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.SearchMigratableResourcesPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_search_migratable_resources_from_dict(): - test_search_migratable_resources(request_type=dict) - - -def test_search_migratable_resources_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_migratable_resources), - '__call__') as call: - client.search_migratable_resources() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == migration_service.SearchMigratableResourcesRequest() - - -@pytest.mark.asyncio -async def test_search_migratable_resources_async(transport: str = 'grpc_asyncio', request_type=migration_service.SearchMigratableResourcesRequest): - client = MigrationServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_migratable_resources), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(migration_service.SearchMigratableResourcesResponse( - next_page_token='next_page_token_value', - )) - response = await client.search_migratable_resources(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == migration_service.SearchMigratableResourcesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.SearchMigratableResourcesAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_search_migratable_resources_async_from_dict(): - await test_search_migratable_resources_async(request_type=dict) - - -def test_search_migratable_resources_field_headers(): - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = migration_service.SearchMigratableResourcesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_migratable_resources), - '__call__') as call: - call.return_value = migration_service.SearchMigratableResourcesResponse() - client.search_migratable_resources(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_search_migratable_resources_field_headers_async(): - client = MigrationServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = migration_service.SearchMigratableResourcesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_migratable_resources), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(migration_service.SearchMigratableResourcesResponse()) - await client.search_migratable_resources(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_search_migratable_resources_flattened(): - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_migratable_resources), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = migration_service.SearchMigratableResourcesResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.search_migratable_resources( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_search_migratable_resources_flattened_error(): - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.search_migratable_resources( - migration_service.SearchMigratableResourcesRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_search_migratable_resources_flattened_async(): - client = MigrationServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_migratable_resources), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = migration_service.SearchMigratableResourcesResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(migration_service.SearchMigratableResourcesResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.search_migratable_resources( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_search_migratable_resources_flattened_error_async(): - client = MigrationServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.search_migratable_resources( - migration_service.SearchMigratableResourcesRequest(), - parent='parent_value', - ) - - -def test_search_migratable_resources_pager(): - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_migratable_resources), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[ - migratable_resource.MigratableResource(), - migratable_resource.MigratableResource(), - migratable_resource.MigratableResource(), - ], - next_page_token='abc', - ), - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[], - next_page_token='def', - ), - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[ - migratable_resource.MigratableResource(), - ], - next_page_token='ghi', - ), - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[ - migratable_resource.MigratableResource(), - migratable_resource.MigratableResource(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.search_migratable_resources(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, migratable_resource.MigratableResource) - for i in results) - -def test_search_migratable_resources_pages(): - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_migratable_resources), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[ - migratable_resource.MigratableResource(), - migratable_resource.MigratableResource(), - migratable_resource.MigratableResource(), - ], - next_page_token='abc', - ), - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[], - next_page_token='def', - ), - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[ - migratable_resource.MigratableResource(), - ], - next_page_token='ghi', - ), - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[ - migratable_resource.MigratableResource(), - migratable_resource.MigratableResource(), - ], - ), - RuntimeError, - ) - pages = list(client.search_migratable_resources(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_search_migratable_resources_async_pager(): - client = MigrationServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_migratable_resources), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[ - migratable_resource.MigratableResource(), - migratable_resource.MigratableResource(), - migratable_resource.MigratableResource(), - ], - next_page_token='abc', - ), - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[], - next_page_token='def', - ), - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[ - migratable_resource.MigratableResource(), - ], - next_page_token='ghi', - ), - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[ - migratable_resource.MigratableResource(), - migratable_resource.MigratableResource(), - ], - ), - RuntimeError, - ) - async_pager = await client.search_migratable_resources(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, migratable_resource.MigratableResource) - for i in responses) - -@pytest.mark.asyncio -async def test_search_migratable_resources_async_pages(): - client = MigrationServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_migratable_resources), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[ - migratable_resource.MigratableResource(), - migratable_resource.MigratableResource(), - migratable_resource.MigratableResource(), - ], - next_page_token='abc', - ), - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[], - next_page_token='def', - ), - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[ - migratable_resource.MigratableResource(), - ], - next_page_token='ghi', - ), - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[ - migratable_resource.MigratableResource(), - migratable_resource.MigratableResource(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.search_migratable_resources(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_batch_migrate_resources(transport: str = 'grpc', request_type=migration_service.BatchMigrateResourcesRequest): - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_migrate_resources), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.batch_migrate_resources(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == migration_service.BatchMigrateResourcesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_batch_migrate_resources_from_dict(): - test_batch_migrate_resources(request_type=dict) - - -def test_batch_migrate_resources_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_migrate_resources), - '__call__') as call: - client.batch_migrate_resources() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == migration_service.BatchMigrateResourcesRequest() - - -@pytest.mark.asyncio -async def test_batch_migrate_resources_async(transport: str = 'grpc_asyncio', request_type=migration_service.BatchMigrateResourcesRequest): - client = MigrationServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_migrate_resources), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.batch_migrate_resources(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == migration_service.BatchMigrateResourcesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_batch_migrate_resources_async_from_dict(): - await test_batch_migrate_resources_async(request_type=dict) - - -def test_batch_migrate_resources_field_headers(): - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = migration_service.BatchMigrateResourcesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_migrate_resources), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.batch_migrate_resources(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_batch_migrate_resources_field_headers_async(): - client = MigrationServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = migration_service.BatchMigrateResourcesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_migrate_resources), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.batch_migrate_resources(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_batch_migrate_resources_flattened(): - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_migrate_resources), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.batch_migrate_resources( - parent='parent_value', - migrate_resource_requests=[migration_service.MigrateResourceRequest(migrate_ml_engine_model_version_config=migration_service.MigrateResourceRequest.MigrateMlEngineModelVersionConfig(endpoint='endpoint_value'))], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].migrate_resource_requests - mock_val = [migration_service.MigrateResourceRequest(migrate_ml_engine_model_version_config=migration_service.MigrateResourceRequest.MigrateMlEngineModelVersionConfig(endpoint='endpoint_value'))] - assert arg == mock_val - - -def test_batch_migrate_resources_flattened_error(): - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.batch_migrate_resources( - migration_service.BatchMigrateResourcesRequest(), - parent='parent_value', - migrate_resource_requests=[migration_service.MigrateResourceRequest(migrate_ml_engine_model_version_config=migration_service.MigrateResourceRequest.MigrateMlEngineModelVersionConfig(endpoint='endpoint_value'))], - ) - - -@pytest.mark.asyncio -async def test_batch_migrate_resources_flattened_async(): - client = MigrationServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_migrate_resources), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.batch_migrate_resources( - parent='parent_value', - migrate_resource_requests=[migration_service.MigrateResourceRequest(migrate_ml_engine_model_version_config=migration_service.MigrateResourceRequest.MigrateMlEngineModelVersionConfig(endpoint='endpoint_value'))], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].migrate_resource_requests - mock_val = [migration_service.MigrateResourceRequest(migrate_ml_engine_model_version_config=migration_service.MigrateResourceRequest.MigrateMlEngineModelVersionConfig(endpoint='endpoint_value'))] - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_batch_migrate_resources_flattened_error_async(): - client = MigrationServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.batch_migrate_resources( - migration_service.BatchMigrateResourcesRequest(), - parent='parent_value', - migrate_resource_requests=[migration_service.MigrateResourceRequest(migrate_ml_engine_model_version_config=migration_service.MigrateResourceRequest.MigrateMlEngineModelVersionConfig(endpoint='endpoint_value'))], - ) - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.MigrationServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.MigrationServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = MigrationServiceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.MigrationServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = MigrationServiceClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.MigrationServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = MigrationServiceClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.MigrationServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.MigrationServiceGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.MigrationServiceGrpcTransport, - transports.MigrationServiceGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.MigrationServiceGrpcTransport, - ) - -def test_migration_service_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.MigrationServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_migration_service_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.aiplatform_v1.services.migration_service.transports.MigrationServiceTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.MigrationServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'search_migratable_resources', - 'batch_migrate_resources', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - with pytest.raises(NotImplementedError): - transport.close() - - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client - - -def test_migration_service_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.migration_service.transports.MigrationServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.MigrationServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -def test_migration_service_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1.services.migration_service.transports.MigrationServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.MigrationServiceTransport() - adc.assert_called_once() - - -def test_migration_service_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - MigrationServiceClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.MigrationServiceGrpcTransport, - transports.MigrationServiceGrpcAsyncIOTransport, - ], -) -def test_migration_service_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.MigrationServiceGrpcTransport, grpc_helpers), - (transports.MigrationServiceGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_migration_service_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "aiplatform.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="aiplatform.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.MigrationServiceGrpcTransport, transports.MigrationServiceGrpcAsyncIOTransport]) -def test_migration_service_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_migration_service_host_no_port(): - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_migration_service_host_with_port(): - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:8000' - -def test_migration_service_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.MigrationServiceGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_migration_service_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.MigrationServiceGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.MigrationServiceGrpcTransport, transports.MigrationServiceGrpcAsyncIOTransport]) -def test_migration_service_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.MigrationServiceGrpcTransport, transports.MigrationServiceGrpcAsyncIOTransport]) -def test_migration_service_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_migration_service_grpc_lro_client(): - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_migration_service_grpc_lro_async_client(): - client = MigrationServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc_asyncio', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsAsyncClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_annotated_dataset_path(): - project = "squid" - dataset = "clam" - annotated_dataset = "whelk" - expected = "projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}".format(project=project, dataset=dataset, annotated_dataset=annotated_dataset, ) - actual = MigrationServiceClient.annotated_dataset_path(project, dataset, annotated_dataset) - assert expected == actual - - -def test_parse_annotated_dataset_path(): - expected = { - "project": "octopus", - "dataset": "oyster", - "annotated_dataset": "nudibranch", - } - path = MigrationServiceClient.annotated_dataset_path(**expected) - - # Check that the path construction is reversible. - actual = MigrationServiceClient.parse_annotated_dataset_path(path) - assert expected == actual - -def test_dataset_path(): - project = "cuttlefish" - location = "mussel" - dataset = "winkle" - expected = "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, location, dataset) - assert expected == actual - - -def test_parse_dataset_path(): - expected = { - "project": "nautilus", - "location": "scallop", - "dataset": "abalone", - } - path = MigrationServiceClient.dataset_path(**expected) - - # Check that the path construction is reversible. - actual = MigrationServiceClient.parse_dataset_path(path) - assert expected == actual - -def test_dataset_path(): - project = "squid" - dataset = "clam" - expected = "projects/{project}/datasets/{dataset}".format(project=project, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, dataset) - assert expected == actual - - -def test_parse_dataset_path(): - expected = { - "project": "whelk", - "dataset": "octopus", - } - path = MigrationServiceClient.dataset_path(**expected) - - # Check that the path construction is reversible. - actual = MigrationServiceClient.parse_dataset_path(path) - assert expected == actual - -def test_dataset_path(): - project = "oyster" - location = "nudibranch" - dataset = "cuttlefish" - expected = "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, location, dataset) - assert expected == actual - - -def test_parse_dataset_path(): - expected = { - "project": "mussel", - "location": "winkle", - "dataset": "nautilus", - } - path = MigrationServiceClient.dataset_path(**expected) - - # Check that the path construction is reversible. - actual = MigrationServiceClient.parse_dataset_path(path) - assert expected == actual - -def test_model_path(): - project = "scallop" - location = "abalone" - model = "squid" - expected = "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) - actual = MigrationServiceClient.model_path(project, location, model) - assert expected == actual - - -def test_parse_model_path(): - expected = { - "project": "clam", - "location": "whelk", - "model": "octopus", - } - path = MigrationServiceClient.model_path(**expected) - - # Check that the path construction is reversible. - actual = MigrationServiceClient.parse_model_path(path) - assert expected == actual - -def test_model_path(): - project = "oyster" - location = "nudibranch" - model = "cuttlefish" - expected = "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) - actual = MigrationServiceClient.model_path(project, location, model) - assert expected == actual - - -def test_parse_model_path(): - expected = { - "project": "mussel", - "location": "winkle", - "model": "nautilus", - } - path = MigrationServiceClient.model_path(**expected) - - # Check that the path construction is reversible. - actual = MigrationServiceClient.parse_model_path(path) - assert expected == actual - -def test_version_path(): - project = "scallop" - model = "abalone" - version = "squid" - expected = "projects/{project}/models/{model}/versions/{version}".format(project=project, model=model, version=version, ) - actual = MigrationServiceClient.version_path(project, model, version) - assert expected == actual - - -def test_parse_version_path(): - expected = { - "project": "clam", - "model": "whelk", - "version": "octopus", - } - path = MigrationServiceClient.version_path(**expected) - - # Check that the path construction is reversible. - actual = MigrationServiceClient.parse_version_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "oyster" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = MigrationServiceClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "nudibranch", - } - path = MigrationServiceClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = MigrationServiceClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "cuttlefish" - expected = "folders/{folder}".format(folder=folder, ) - actual = MigrationServiceClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "mussel", - } - path = MigrationServiceClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = MigrationServiceClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "winkle" - expected = "organizations/{organization}".format(organization=organization, ) - actual = MigrationServiceClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "nautilus", - } - path = MigrationServiceClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = MigrationServiceClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "scallop" - expected = "projects/{project}".format(project=project, ) - actual = MigrationServiceClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "abalone", - } - path = MigrationServiceClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = MigrationServiceClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "squid" - location = "clam" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = MigrationServiceClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "whelk", - "location": "octopus", - } - path = MigrationServiceClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = MigrationServiceClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.MigrationServiceTransport, '_prep_wrapped_messages') as prep: - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.MigrationServiceTransport, '_prep_wrapped_messages') as prep: - transport_class = MigrationServiceClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - -@pytest.mark.asyncio -async def test_transport_close_async(): - client = MigrationServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: - async with client: - close.assert_not_called() - close.assert_called_once() - -def test_transport_close(): - transports = { - "grpc": "_grpc_channel", - } - - for transport, close_name in transports.items(): - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: - with client: - close.assert_not_called() - close.assert_called_once() - -def test_client_ctx(): - transports = [ - 'grpc', - ] - for transport in transports: - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - # Test client calls underlying transport. - with mock.patch.object(type(client.transport), "close") as close: - close.assert_not_called() - with client: - pass - close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_model_service.py b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_model_service.py deleted file mode 100644 index 83966af9c1..0000000000 --- a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_model_service.py +++ /dev/null @@ -1,4088 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import future -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import operation_async # type: ignore -from google.api_core import operations_v1 -from google.api_core import path_template -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.aiplatform_v1.services.model_service import ModelServiceAsyncClient -from google.cloud.aiplatform_v1.services.model_service import ModelServiceClient -from google.cloud.aiplatform_v1.services.model_service import pagers -from google.cloud.aiplatform_v1.services.model_service import transports -from google.cloud.aiplatform_v1.types import deployed_model_ref -from google.cloud.aiplatform_v1.types import encryption_spec -from google.cloud.aiplatform_v1.types import env_var -from google.cloud.aiplatform_v1.types import explanation -from google.cloud.aiplatform_v1.types import explanation_metadata -from google.cloud.aiplatform_v1.types import io -from google.cloud.aiplatform_v1.types import model -from google.cloud.aiplatform_v1.types import model as gca_model -from google.cloud.aiplatform_v1.types import model_evaluation -from google.cloud.aiplatform_v1.types import model_evaluation_slice -from google.cloud.aiplatform_v1.types import model_service -from google.cloud.aiplatform_v1.types import operation as gca_operation -from google.longrunning import operations_pb2 -from google.oauth2 import service_account -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -import google.auth - - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert ModelServiceClient._get_default_mtls_endpoint(None) is None - assert ModelServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert ModelServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert ModelServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert ModelServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert ModelServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - ModelServiceClient, - ModelServiceAsyncClient, -]) -def test_model_service_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.ModelServiceGrpcTransport, "grpc"), - (transports.ModelServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_model_service_client_service_account_always_use_jwt(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=False) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("client_class", [ - ModelServiceClient, - ModelServiceAsyncClient, -]) -def test_model_service_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_model_service_client_get_transport_class(): - transport = ModelServiceClient.get_transport_class() - available_transports = [ - transports.ModelServiceGrpcTransport, - ] - assert transport in available_transports - - transport = ModelServiceClient.get_transport_class("grpc") - assert transport == transports.ModelServiceGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (ModelServiceClient, transports.ModelServiceGrpcTransport, "grpc"), - (ModelServiceAsyncClient, transports.ModelServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(ModelServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ModelServiceClient)) -@mock.patch.object(ModelServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ModelServiceAsyncClient)) -def test_model_service_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(ModelServiceClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(ModelServiceClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (ModelServiceClient, transports.ModelServiceGrpcTransport, "grpc", "true"), - (ModelServiceAsyncClient, transports.ModelServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (ModelServiceClient, transports.ModelServiceGrpcTransport, "grpc", "false"), - (ModelServiceAsyncClient, transports.ModelServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(ModelServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ModelServiceClient)) -@mock.patch.object(ModelServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ModelServiceAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_model_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (ModelServiceClient, transports.ModelServiceGrpcTransport, "grpc"), - (ModelServiceAsyncClient, transports.ModelServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_model_service_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (ModelServiceClient, transports.ModelServiceGrpcTransport, "grpc"), - (ModelServiceAsyncClient, transports.ModelServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_model_service_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_model_service_client_client_options_from_dict(): - with mock.patch('google.cloud.aiplatform_v1.services.model_service.transports.ModelServiceGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = ModelServiceClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_upload_model(transport: str = 'grpc', request_type=model_service.UploadModelRequest): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.upload_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.upload_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.UploadModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_upload_model_from_dict(): - test_upload_model(request_type=dict) - - -def test_upload_model_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.upload_model), - '__call__') as call: - client.upload_model() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.UploadModelRequest() - - -@pytest.mark.asyncio -async def test_upload_model_async(transport: str = 'grpc_asyncio', request_type=model_service.UploadModelRequest): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.upload_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.upload_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.UploadModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_upload_model_async_from_dict(): - await test_upload_model_async(request_type=dict) - - -def test_upload_model_field_headers(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.UploadModelRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.upload_model), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.upload_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_upload_model_field_headers_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.UploadModelRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.upload_model), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.upload_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_upload_model_flattened(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.upload_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.upload_model( - parent='parent_value', - model=gca_model.Model(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].model - mock_val = gca_model.Model(name='name_value') - assert arg == mock_val - - -def test_upload_model_flattened_error(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.upload_model( - model_service.UploadModelRequest(), - parent='parent_value', - model=gca_model.Model(name='name_value'), - ) - - -@pytest.mark.asyncio -async def test_upload_model_flattened_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.upload_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.upload_model( - parent='parent_value', - model=gca_model.Model(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].model - mock_val = gca_model.Model(name='name_value') - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_upload_model_flattened_error_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.upload_model( - model_service.UploadModelRequest(), - parent='parent_value', - model=gca_model.Model(name='name_value'), - ) - - -def test_get_model(transport: str = 'grpc', request_type=model_service.GetModelRequest): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model.Model( - name='name_value', - display_name='display_name_value', - description='description_value', - metadata_schema_uri='metadata_schema_uri_value', - training_pipeline='training_pipeline_value', - artifact_uri='artifact_uri_value', - supported_deployment_resources_types=[model.Model.DeploymentResourcesType.DEDICATED_RESOURCES], - supported_input_storage_formats=['supported_input_storage_formats_value'], - supported_output_storage_formats=['supported_output_storage_formats_value'], - etag='etag_value', - ) - response = client.get_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.GetModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, model.Model) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.metadata_schema_uri == 'metadata_schema_uri_value' - assert response.training_pipeline == 'training_pipeline_value' - assert response.artifact_uri == 'artifact_uri_value' - assert response.supported_deployment_resources_types == [model.Model.DeploymentResourcesType.DEDICATED_RESOURCES] - assert response.supported_input_storage_formats == ['supported_input_storage_formats_value'] - assert response.supported_output_storage_formats == ['supported_output_storage_formats_value'] - assert response.etag == 'etag_value' - - -def test_get_model_from_dict(): - test_get_model(request_type=dict) - - -def test_get_model_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model), - '__call__') as call: - client.get_model() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.GetModelRequest() - - -@pytest.mark.asyncio -async def test_get_model_async(transport: str = 'grpc_asyncio', request_type=model_service.GetModelRequest): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model.Model( - name='name_value', - display_name='display_name_value', - description='description_value', - metadata_schema_uri='metadata_schema_uri_value', - training_pipeline='training_pipeline_value', - artifact_uri='artifact_uri_value', - supported_deployment_resources_types=[model.Model.DeploymentResourcesType.DEDICATED_RESOURCES], - supported_input_storage_formats=['supported_input_storage_formats_value'], - supported_output_storage_formats=['supported_output_storage_formats_value'], - etag='etag_value', - )) - response = await client.get_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.GetModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, model.Model) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.metadata_schema_uri == 'metadata_schema_uri_value' - assert response.training_pipeline == 'training_pipeline_value' - assert response.artifact_uri == 'artifact_uri_value' - assert response.supported_deployment_resources_types == [model.Model.DeploymentResourcesType.DEDICATED_RESOURCES] - assert response.supported_input_storage_formats == ['supported_input_storage_formats_value'] - assert response.supported_output_storage_formats == ['supported_output_storage_formats_value'] - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_get_model_async_from_dict(): - await test_get_model_async(request_type=dict) - - -def test_get_model_field_headers(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.GetModelRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model), - '__call__') as call: - call.return_value = model.Model() - client.get_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_model_field_headers_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.GetModelRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model.Model()) - await client.get_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_model_flattened(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model.Model() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_model( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_model_flattened_error(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_model( - model_service.GetModelRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_model_flattened_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model.Model() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model.Model()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_model( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_model_flattened_error_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_model( - model_service.GetModelRequest(), - name='name_value', - ) - - -def test_list_models(transport: str = 'grpc', request_type=model_service.ListModelsRequest): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_service.ListModelsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_models(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.ListModelsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListModelsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_models_from_dict(): - test_list_models(request_type=dict) - - -def test_list_models_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__') as call: - client.list_models() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.ListModelsRequest() - - -@pytest.mark.asyncio -async def test_list_models_async(transport: str = 'grpc_asyncio', request_type=model_service.ListModelsRequest): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_models(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.ListModelsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListModelsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_models_async_from_dict(): - await test_list_models_async(request_type=dict) - - -def test_list_models_field_headers(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.ListModelsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__') as call: - call.return_value = model_service.ListModelsResponse() - client.list_models(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_models_field_headers_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.ListModelsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelsResponse()) - await client.list_models(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_models_flattened(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_service.ListModelsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_models( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_models_flattened_error(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_models( - model_service.ListModelsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_models_flattened_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_service.ListModelsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_models( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_models_flattened_error_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_models( - model_service.ListModelsRequest(), - parent='parent_value', - ) - - -def test_list_models_pager(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - model_service.ListModelsResponse( - models=[ - model.Model(), - model.Model(), - model.Model(), - ], - next_page_token='abc', - ), - model_service.ListModelsResponse( - models=[], - next_page_token='def', - ), - model_service.ListModelsResponse( - models=[ - model.Model(), - ], - next_page_token='ghi', - ), - model_service.ListModelsResponse( - models=[ - model.Model(), - model.Model(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_models(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, model.Model) - for i in results) - -def test_list_models_pages(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - model_service.ListModelsResponse( - models=[ - model.Model(), - model.Model(), - model.Model(), - ], - next_page_token='abc', - ), - model_service.ListModelsResponse( - models=[], - next_page_token='def', - ), - model_service.ListModelsResponse( - models=[ - model.Model(), - ], - next_page_token='ghi', - ), - model_service.ListModelsResponse( - models=[ - model.Model(), - model.Model(), - ], - ), - RuntimeError, - ) - pages = list(client.list_models(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_models_async_pager(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - model_service.ListModelsResponse( - models=[ - model.Model(), - model.Model(), - model.Model(), - ], - next_page_token='abc', - ), - model_service.ListModelsResponse( - models=[], - next_page_token='def', - ), - model_service.ListModelsResponse( - models=[ - model.Model(), - ], - next_page_token='ghi', - ), - model_service.ListModelsResponse( - models=[ - model.Model(), - model.Model(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_models(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, model.Model) - for i in responses) - -@pytest.mark.asyncio -async def test_list_models_async_pages(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - model_service.ListModelsResponse( - models=[ - model.Model(), - model.Model(), - model.Model(), - ], - next_page_token='abc', - ), - model_service.ListModelsResponse( - models=[], - next_page_token='def', - ), - model_service.ListModelsResponse( - models=[ - model.Model(), - ], - next_page_token='ghi', - ), - model_service.ListModelsResponse( - models=[ - model.Model(), - model.Model(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_models(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_update_model(transport: str = 'grpc', request_type=model_service.UpdateModelRequest): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_model.Model( - name='name_value', - display_name='display_name_value', - description='description_value', - metadata_schema_uri='metadata_schema_uri_value', - training_pipeline='training_pipeline_value', - artifact_uri='artifact_uri_value', - supported_deployment_resources_types=[gca_model.Model.DeploymentResourcesType.DEDICATED_RESOURCES], - supported_input_storage_formats=['supported_input_storage_formats_value'], - supported_output_storage_formats=['supported_output_storage_formats_value'], - etag='etag_value', - ) - response = client.update_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.UpdateModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_model.Model) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.metadata_schema_uri == 'metadata_schema_uri_value' - assert response.training_pipeline == 'training_pipeline_value' - assert response.artifact_uri == 'artifact_uri_value' - assert response.supported_deployment_resources_types == [gca_model.Model.DeploymentResourcesType.DEDICATED_RESOURCES] - assert response.supported_input_storage_formats == ['supported_input_storage_formats_value'] - assert response.supported_output_storage_formats == ['supported_output_storage_formats_value'] - assert response.etag == 'etag_value' - - -def test_update_model_from_dict(): - test_update_model(request_type=dict) - - -def test_update_model_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_model), - '__call__') as call: - client.update_model() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.UpdateModelRequest() - - -@pytest.mark.asyncio -async def test_update_model_async(transport: str = 'grpc_asyncio', request_type=model_service.UpdateModelRequest): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_model.Model( - name='name_value', - display_name='display_name_value', - description='description_value', - metadata_schema_uri='metadata_schema_uri_value', - training_pipeline='training_pipeline_value', - artifact_uri='artifact_uri_value', - supported_deployment_resources_types=[gca_model.Model.DeploymentResourcesType.DEDICATED_RESOURCES], - supported_input_storage_formats=['supported_input_storage_formats_value'], - supported_output_storage_formats=['supported_output_storage_formats_value'], - etag='etag_value', - )) - response = await client.update_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.UpdateModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_model.Model) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.metadata_schema_uri == 'metadata_schema_uri_value' - assert response.training_pipeline == 'training_pipeline_value' - assert response.artifact_uri == 'artifact_uri_value' - assert response.supported_deployment_resources_types == [gca_model.Model.DeploymentResourcesType.DEDICATED_RESOURCES] - assert response.supported_input_storage_formats == ['supported_input_storage_formats_value'] - assert response.supported_output_storage_formats == ['supported_output_storage_formats_value'] - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_update_model_async_from_dict(): - await test_update_model_async(request_type=dict) - - -def test_update_model_field_headers(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.UpdateModelRequest() - - request.model.name = 'model.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_model), - '__call__') as call: - call.return_value = gca_model.Model() - client.update_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'model.name=model.name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_model_field_headers_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.UpdateModelRequest() - - request.model.name = 'model.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_model), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_model.Model()) - await client.update_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'model.name=model.name/value', - ) in kw['metadata'] - - -def test_update_model_flattened(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_model.Model() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_model( - model=gca_model.Model(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].model - mock_val = gca_model.Model(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -def test_update_model_flattened_error(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_model( - model_service.UpdateModelRequest(), - model=gca_model.Model(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.asyncio -async def test_update_model_flattened_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_model.Model() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_model.Model()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_model( - model=gca_model.Model(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].model - mock_val = gca_model.Model(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_update_model_flattened_error_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_model( - model_service.UpdateModelRequest(), - model=gca_model.Model(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_delete_model(transport: str = 'grpc', request_type=model_service.DeleteModelRequest): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.DeleteModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_model_from_dict(): - test_delete_model(request_type=dict) - - -def test_delete_model_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model), - '__call__') as call: - client.delete_model() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.DeleteModelRequest() - - -@pytest.mark.asyncio -async def test_delete_model_async(transport: str = 'grpc_asyncio', request_type=model_service.DeleteModelRequest): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.DeleteModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_model_async_from_dict(): - await test_delete_model_async(request_type=dict) - - -def test_delete_model_field_headers(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.DeleteModelRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_model_field_headers_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.DeleteModelRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_model_flattened(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_model( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_delete_model_flattened_error(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_model( - model_service.DeleteModelRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_model_flattened_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_model( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_model_flattened_error_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_model( - model_service.DeleteModelRequest(), - name='name_value', - ) - - -def test_export_model(transport: str = 'grpc', request_type=model_service.ExportModelRequest): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.export_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.ExportModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_export_model_from_dict(): - test_export_model(request_type=dict) - - -def test_export_model_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_model), - '__call__') as call: - client.export_model() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.ExportModelRequest() - - -@pytest.mark.asyncio -async def test_export_model_async(transport: str = 'grpc_asyncio', request_type=model_service.ExportModelRequest): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.export_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.ExportModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_export_model_async_from_dict(): - await test_export_model_async(request_type=dict) - - -def test_export_model_field_headers(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.ExportModelRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_model), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.export_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_export_model_field_headers_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.ExportModelRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_model), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.export_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_export_model_flattened(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.export_model( - name='name_value', - output_config=model_service.ExportModelRequest.OutputConfig(export_format_id='export_format_id_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - arg = args[0].output_config - mock_val = model_service.ExportModelRequest.OutputConfig(export_format_id='export_format_id_value') - assert arg == mock_val - - -def test_export_model_flattened_error(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.export_model( - model_service.ExportModelRequest(), - name='name_value', - output_config=model_service.ExportModelRequest.OutputConfig(export_format_id='export_format_id_value'), - ) - - -@pytest.mark.asyncio -async def test_export_model_flattened_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.export_model( - name='name_value', - output_config=model_service.ExportModelRequest.OutputConfig(export_format_id='export_format_id_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - arg = args[0].output_config - mock_val = model_service.ExportModelRequest.OutputConfig(export_format_id='export_format_id_value') - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_export_model_flattened_error_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.export_model( - model_service.ExportModelRequest(), - name='name_value', - output_config=model_service.ExportModelRequest.OutputConfig(export_format_id='export_format_id_value'), - ) - - -def test_get_model_evaluation(transport: str = 'grpc', request_type=model_service.GetModelEvaluationRequest): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_evaluation.ModelEvaluation( - name='name_value', - metrics_schema_uri='metrics_schema_uri_value', - slice_dimensions=['slice_dimensions_value'], - ) - response = client.get_model_evaluation(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.GetModelEvaluationRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, model_evaluation.ModelEvaluation) - assert response.name == 'name_value' - assert response.metrics_schema_uri == 'metrics_schema_uri_value' - assert response.slice_dimensions == ['slice_dimensions_value'] - - -def test_get_model_evaluation_from_dict(): - test_get_model_evaluation(request_type=dict) - - -def test_get_model_evaluation_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation), - '__call__') as call: - client.get_model_evaluation() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.GetModelEvaluationRequest() - - -@pytest.mark.asyncio -async def test_get_model_evaluation_async(transport: str = 'grpc_asyncio', request_type=model_service.GetModelEvaluationRequest): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model_evaluation.ModelEvaluation( - name='name_value', - metrics_schema_uri='metrics_schema_uri_value', - slice_dimensions=['slice_dimensions_value'], - )) - response = await client.get_model_evaluation(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.GetModelEvaluationRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, model_evaluation.ModelEvaluation) - assert response.name == 'name_value' - assert response.metrics_schema_uri == 'metrics_schema_uri_value' - assert response.slice_dimensions == ['slice_dimensions_value'] - - -@pytest.mark.asyncio -async def test_get_model_evaluation_async_from_dict(): - await test_get_model_evaluation_async(request_type=dict) - - -def test_get_model_evaluation_field_headers(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.GetModelEvaluationRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation), - '__call__') as call: - call.return_value = model_evaluation.ModelEvaluation() - client.get_model_evaluation(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_model_evaluation_field_headers_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.GetModelEvaluationRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_evaluation.ModelEvaluation()) - await client.get_model_evaluation(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_model_evaluation_flattened(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_evaluation.ModelEvaluation() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_model_evaluation( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_model_evaluation_flattened_error(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_model_evaluation( - model_service.GetModelEvaluationRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_model_evaluation_flattened_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_evaluation.ModelEvaluation() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_evaluation.ModelEvaluation()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_model_evaluation( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_model_evaluation_flattened_error_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_model_evaluation( - model_service.GetModelEvaluationRequest(), - name='name_value', - ) - - -def test_list_model_evaluations(transport: str = 'grpc', request_type=model_service.ListModelEvaluationsRequest): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_service.ListModelEvaluationsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_model_evaluations(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.ListModelEvaluationsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListModelEvaluationsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_model_evaluations_from_dict(): - test_list_model_evaluations(request_type=dict) - - -def test_list_model_evaluations_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__') as call: - client.list_model_evaluations() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.ListModelEvaluationsRequest() - - -@pytest.mark.asyncio -async def test_list_model_evaluations_async(transport: str = 'grpc_asyncio', request_type=model_service.ListModelEvaluationsRequest): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelEvaluationsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_model_evaluations(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.ListModelEvaluationsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListModelEvaluationsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_model_evaluations_async_from_dict(): - await test_list_model_evaluations_async(request_type=dict) - - -def test_list_model_evaluations_field_headers(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.ListModelEvaluationsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__') as call: - call.return_value = model_service.ListModelEvaluationsResponse() - client.list_model_evaluations(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_model_evaluations_field_headers_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.ListModelEvaluationsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelEvaluationsResponse()) - await client.list_model_evaluations(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_model_evaluations_flattened(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_service.ListModelEvaluationsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_model_evaluations( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_model_evaluations_flattened_error(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_model_evaluations( - model_service.ListModelEvaluationsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_model_evaluations_flattened_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_service.ListModelEvaluationsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelEvaluationsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_model_evaluations( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_model_evaluations_flattened_error_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_model_evaluations( - model_service.ListModelEvaluationsRequest(), - parent='parent_value', - ) - - -def test_list_model_evaluations_pager(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - model_service.ListModelEvaluationsResponse( - model_evaluations=[ - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - ], - next_page_token='abc', - ), - model_service.ListModelEvaluationsResponse( - model_evaluations=[], - next_page_token='def', - ), - model_service.ListModelEvaluationsResponse( - model_evaluations=[ - model_evaluation.ModelEvaluation(), - ], - next_page_token='ghi', - ), - model_service.ListModelEvaluationsResponse( - model_evaluations=[ - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_model_evaluations(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, model_evaluation.ModelEvaluation) - for i in results) - -def test_list_model_evaluations_pages(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - model_service.ListModelEvaluationsResponse( - model_evaluations=[ - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - ], - next_page_token='abc', - ), - model_service.ListModelEvaluationsResponse( - model_evaluations=[], - next_page_token='def', - ), - model_service.ListModelEvaluationsResponse( - model_evaluations=[ - model_evaluation.ModelEvaluation(), - ], - next_page_token='ghi', - ), - model_service.ListModelEvaluationsResponse( - model_evaluations=[ - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - ], - ), - RuntimeError, - ) - pages = list(client.list_model_evaluations(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_model_evaluations_async_pager(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - model_service.ListModelEvaluationsResponse( - model_evaluations=[ - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - ], - next_page_token='abc', - ), - model_service.ListModelEvaluationsResponse( - model_evaluations=[], - next_page_token='def', - ), - model_service.ListModelEvaluationsResponse( - model_evaluations=[ - model_evaluation.ModelEvaluation(), - ], - next_page_token='ghi', - ), - model_service.ListModelEvaluationsResponse( - model_evaluations=[ - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_model_evaluations(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, model_evaluation.ModelEvaluation) - for i in responses) - -@pytest.mark.asyncio -async def test_list_model_evaluations_async_pages(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - model_service.ListModelEvaluationsResponse( - model_evaluations=[ - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - ], - next_page_token='abc', - ), - model_service.ListModelEvaluationsResponse( - model_evaluations=[], - next_page_token='def', - ), - model_service.ListModelEvaluationsResponse( - model_evaluations=[ - model_evaluation.ModelEvaluation(), - ], - next_page_token='ghi', - ), - model_service.ListModelEvaluationsResponse( - model_evaluations=[ - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_model_evaluations(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_get_model_evaluation_slice(transport: str = 'grpc', request_type=model_service.GetModelEvaluationSliceRequest): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation_slice), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_evaluation_slice.ModelEvaluationSlice( - name='name_value', - metrics_schema_uri='metrics_schema_uri_value', - ) - response = client.get_model_evaluation_slice(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.GetModelEvaluationSliceRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, model_evaluation_slice.ModelEvaluationSlice) - assert response.name == 'name_value' - assert response.metrics_schema_uri == 'metrics_schema_uri_value' - - -def test_get_model_evaluation_slice_from_dict(): - test_get_model_evaluation_slice(request_type=dict) - - -def test_get_model_evaluation_slice_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation_slice), - '__call__') as call: - client.get_model_evaluation_slice() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.GetModelEvaluationSliceRequest() - - -@pytest.mark.asyncio -async def test_get_model_evaluation_slice_async(transport: str = 'grpc_asyncio', request_type=model_service.GetModelEvaluationSliceRequest): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation_slice), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model_evaluation_slice.ModelEvaluationSlice( - name='name_value', - metrics_schema_uri='metrics_schema_uri_value', - )) - response = await client.get_model_evaluation_slice(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.GetModelEvaluationSliceRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, model_evaluation_slice.ModelEvaluationSlice) - assert response.name == 'name_value' - assert response.metrics_schema_uri == 'metrics_schema_uri_value' - - -@pytest.mark.asyncio -async def test_get_model_evaluation_slice_async_from_dict(): - await test_get_model_evaluation_slice_async(request_type=dict) - - -def test_get_model_evaluation_slice_field_headers(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.GetModelEvaluationSliceRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation_slice), - '__call__') as call: - call.return_value = model_evaluation_slice.ModelEvaluationSlice() - client.get_model_evaluation_slice(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_model_evaluation_slice_field_headers_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.GetModelEvaluationSliceRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation_slice), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_evaluation_slice.ModelEvaluationSlice()) - await client.get_model_evaluation_slice(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_model_evaluation_slice_flattened(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation_slice), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_evaluation_slice.ModelEvaluationSlice() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_model_evaluation_slice( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_model_evaluation_slice_flattened_error(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_model_evaluation_slice( - model_service.GetModelEvaluationSliceRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_model_evaluation_slice_flattened_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation_slice), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_evaluation_slice.ModelEvaluationSlice() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_evaluation_slice.ModelEvaluationSlice()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_model_evaluation_slice( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_model_evaluation_slice_flattened_error_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_model_evaluation_slice( - model_service.GetModelEvaluationSliceRequest(), - name='name_value', - ) - - -def test_list_model_evaluation_slices(transport: str = 'grpc', request_type=model_service.ListModelEvaluationSlicesRequest): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluation_slices), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_service.ListModelEvaluationSlicesResponse( - next_page_token='next_page_token_value', - ) - response = client.list_model_evaluation_slices(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.ListModelEvaluationSlicesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListModelEvaluationSlicesPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_model_evaluation_slices_from_dict(): - test_list_model_evaluation_slices(request_type=dict) - - -def test_list_model_evaluation_slices_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluation_slices), - '__call__') as call: - client.list_model_evaluation_slices() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.ListModelEvaluationSlicesRequest() - - -@pytest.mark.asyncio -async def test_list_model_evaluation_slices_async(transport: str = 'grpc_asyncio', request_type=model_service.ListModelEvaluationSlicesRequest): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluation_slices), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelEvaluationSlicesResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_model_evaluation_slices(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.ListModelEvaluationSlicesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListModelEvaluationSlicesAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_model_evaluation_slices_async_from_dict(): - await test_list_model_evaluation_slices_async(request_type=dict) - - -def test_list_model_evaluation_slices_field_headers(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.ListModelEvaluationSlicesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluation_slices), - '__call__') as call: - call.return_value = model_service.ListModelEvaluationSlicesResponse() - client.list_model_evaluation_slices(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_model_evaluation_slices_field_headers_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.ListModelEvaluationSlicesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluation_slices), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelEvaluationSlicesResponse()) - await client.list_model_evaluation_slices(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_model_evaluation_slices_flattened(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluation_slices), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_service.ListModelEvaluationSlicesResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_model_evaluation_slices( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_model_evaluation_slices_flattened_error(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_model_evaluation_slices( - model_service.ListModelEvaluationSlicesRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_model_evaluation_slices_flattened_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluation_slices), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_service.ListModelEvaluationSlicesResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelEvaluationSlicesResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_model_evaluation_slices( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_model_evaluation_slices_flattened_error_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_model_evaluation_slices( - model_service.ListModelEvaluationSlicesRequest(), - parent='parent_value', - ) - - -def test_list_model_evaluation_slices_pager(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluation_slices), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[ - model_evaluation_slice.ModelEvaluationSlice(), - model_evaluation_slice.ModelEvaluationSlice(), - model_evaluation_slice.ModelEvaluationSlice(), - ], - next_page_token='abc', - ), - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[], - next_page_token='def', - ), - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[ - model_evaluation_slice.ModelEvaluationSlice(), - ], - next_page_token='ghi', - ), - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[ - model_evaluation_slice.ModelEvaluationSlice(), - model_evaluation_slice.ModelEvaluationSlice(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_model_evaluation_slices(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, model_evaluation_slice.ModelEvaluationSlice) - for i in results) - -def test_list_model_evaluation_slices_pages(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluation_slices), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[ - model_evaluation_slice.ModelEvaluationSlice(), - model_evaluation_slice.ModelEvaluationSlice(), - model_evaluation_slice.ModelEvaluationSlice(), - ], - next_page_token='abc', - ), - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[], - next_page_token='def', - ), - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[ - model_evaluation_slice.ModelEvaluationSlice(), - ], - next_page_token='ghi', - ), - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[ - model_evaluation_slice.ModelEvaluationSlice(), - model_evaluation_slice.ModelEvaluationSlice(), - ], - ), - RuntimeError, - ) - pages = list(client.list_model_evaluation_slices(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_model_evaluation_slices_async_pager(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluation_slices), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[ - model_evaluation_slice.ModelEvaluationSlice(), - model_evaluation_slice.ModelEvaluationSlice(), - model_evaluation_slice.ModelEvaluationSlice(), - ], - next_page_token='abc', - ), - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[], - next_page_token='def', - ), - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[ - model_evaluation_slice.ModelEvaluationSlice(), - ], - next_page_token='ghi', - ), - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[ - model_evaluation_slice.ModelEvaluationSlice(), - model_evaluation_slice.ModelEvaluationSlice(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_model_evaluation_slices(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, model_evaluation_slice.ModelEvaluationSlice) - for i in responses) - -@pytest.mark.asyncio -async def test_list_model_evaluation_slices_async_pages(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluation_slices), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[ - model_evaluation_slice.ModelEvaluationSlice(), - model_evaluation_slice.ModelEvaluationSlice(), - model_evaluation_slice.ModelEvaluationSlice(), - ], - next_page_token='abc', - ), - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[], - next_page_token='def', - ), - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[ - model_evaluation_slice.ModelEvaluationSlice(), - ], - next_page_token='ghi', - ), - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[ - model_evaluation_slice.ModelEvaluationSlice(), - model_evaluation_slice.ModelEvaluationSlice(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_model_evaluation_slices(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.ModelServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.ModelServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = ModelServiceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.ModelServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = ModelServiceClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.ModelServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = ModelServiceClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.ModelServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.ModelServiceGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.ModelServiceGrpcTransport, - transports.ModelServiceGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.ModelServiceGrpcTransport, - ) - -def test_model_service_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.ModelServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_model_service_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.aiplatform_v1.services.model_service.transports.ModelServiceTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.ModelServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'upload_model', - 'get_model', - 'list_models', - 'update_model', - 'delete_model', - 'export_model', - 'get_model_evaluation', - 'list_model_evaluations', - 'get_model_evaluation_slice', - 'list_model_evaluation_slices', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - with pytest.raises(NotImplementedError): - transport.close() - - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client - - -def test_model_service_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.model_service.transports.ModelServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.ModelServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -def test_model_service_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1.services.model_service.transports.ModelServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.ModelServiceTransport() - adc.assert_called_once() - - -def test_model_service_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - ModelServiceClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.ModelServiceGrpcTransport, - transports.ModelServiceGrpcAsyncIOTransport, - ], -) -def test_model_service_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.ModelServiceGrpcTransport, grpc_helpers), - (transports.ModelServiceGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_model_service_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "aiplatform.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="aiplatform.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.ModelServiceGrpcTransport, transports.ModelServiceGrpcAsyncIOTransport]) -def test_model_service_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_model_service_host_no_port(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_model_service_host_with_port(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:8000' - -def test_model_service_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.ModelServiceGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_model_service_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.ModelServiceGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.ModelServiceGrpcTransport, transports.ModelServiceGrpcAsyncIOTransport]) -def test_model_service_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.ModelServiceGrpcTransport, transports.ModelServiceGrpcAsyncIOTransport]) -def test_model_service_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_model_service_grpc_lro_client(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_model_service_grpc_lro_async_client(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc_asyncio', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsAsyncClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_endpoint_path(): - project = "squid" - location = "clam" - endpoint = "whelk" - expected = "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) - actual = ModelServiceClient.endpoint_path(project, location, endpoint) - assert expected == actual - - -def test_parse_endpoint_path(): - expected = { - "project": "octopus", - "location": "oyster", - "endpoint": "nudibranch", - } - path = ModelServiceClient.endpoint_path(**expected) - - # Check that the path construction is reversible. - actual = ModelServiceClient.parse_endpoint_path(path) - assert expected == actual - -def test_model_path(): - project = "cuttlefish" - location = "mussel" - model = "winkle" - expected = "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) - actual = ModelServiceClient.model_path(project, location, model) - assert expected == actual - - -def test_parse_model_path(): - expected = { - "project": "nautilus", - "location": "scallop", - "model": "abalone", - } - path = ModelServiceClient.model_path(**expected) - - # Check that the path construction is reversible. - actual = ModelServiceClient.parse_model_path(path) - assert expected == actual - -def test_model_evaluation_path(): - project = "squid" - location = "clam" - model = "whelk" - evaluation = "octopus" - expected = "projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}".format(project=project, location=location, model=model, evaluation=evaluation, ) - actual = ModelServiceClient.model_evaluation_path(project, location, model, evaluation) - assert expected == actual - - -def test_parse_model_evaluation_path(): - expected = { - "project": "oyster", - "location": "nudibranch", - "model": "cuttlefish", - "evaluation": "mussel", - } - path = ModelServiceClient.model_evaluation_path(**expected) - - # Check that the path construction is reversible. - actual = ModelServiceClient.parse_model_evaluation_path(path) - assert expected == actual - -def test_model_evaluation_slice_path(): - project = "winkle" - location = "nautilus" - model = "scallop" - evaluation = "abalone" - slice = "squid" - expected = "projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}".format(project=project, location=location, model=model, evaluation=evaluation, slice=slice, ) - actual = ModelServiceClient.model_evaluation_slice_path(project, location, model, evaluation, slice) - assert expected == actual - - -def test_parse_model_evaluation_slice_path(): - expected = { - "project": "clam", - "location": "whelk", - "model": "octopus", - "evaluation": "oyster", - "slice": "nudibranch", - } - path = ModelServiceClient.model_evaluation_slice_path(**expected) - - # Check that the path construction is reversible. - actual = ModelServiceClient.parse_model_evaluation_slice_path(path) - assert expected == actual - -def test_training_pipeline_path(): - project = "cuttlefish" - location = "mussel" - training_pipeline = "winkle" - expected = "projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}".format(project=project, location=location, training_pipeline=training_pipeline, ) - actual = ModelServiceClient.training_pipeline_path(project, location, training_pipeline) - assert expected == actual - - -def test_parse_training_pipeline_path(): - expected = { - "project": "nautilus", - "location": "scallop", - "training_pipeline": "abalone", - } - path = ModelServiceClient.training_pipeline_path(**expected) - - # Check that the path construction is reversible. - actual = ModelServiceClient.parse_training_pipeline_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "squid" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = ModelServiceClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "clam", - } - path = ModelServiceClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = ModelServiceClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "whelk" - expected = "folders/{folder}".format(folder=folder, ) - actual = ModelServiceClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "octopus", - } - path = ModelServiceClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = ModelServiceClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "oyster" - expected = "organizations/{organization}".format(organization=organization, ) - actual = ModelServiceClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "nudibranch", - } - path = ModelServiceClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = ModelServiceClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "cuttlefish" - expected = "projects/{project}".format(project=project, ) - actual = ModelServiceClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "mussel", - } - path = ModelServiceClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = ModelServiceClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "winkle" - location = "nautilus" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = ModelServiceClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "scallop", - "location": "abalone", - } - path = ModelServiceClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = ModelServiceClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.ModelServiceTransport, '_prep_wrapped_messages') as prep: - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.ModelServiceTransport, '_prep_wrapped_messages') as prep: - transport_class = ModelServiceClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - -@pytest.mark.asyncio -async def test_transport_close_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: - async with client: - close.assert_not_called() - close.assert_called_once() - -def test_transport_close(): - transports = { - "grpc": "_grpc_channel", - } - - for transport, close_name in transports.items(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: - with client: - close.assert_not_called() - close.assert_called_once() - -def test_client_ctx(): - transports = [ - 'grpc', - ] - for transport in transports: - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - # Test client calls underlying transport. - with mock.patch.object(type(client.transport), "close") as close: - close.assert_not_called() - with client: - pass - close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_pipeline_service.py b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_pipeline_service.py deleted file mode 100644 index 00c3a8aa08..0000000000 --- a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_pipeline_service.py +++ /dev/null @@ -1,3953 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import future -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import operation_async # type: ignore -from google.api_core import operations_v1 -from google.api_core import path_template -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.aiplatform_v1.services.pipeline_service import PipelineServiceAsyncClient -from google.cloud.aiplatform_v1.services.pipeline_service import PipelineServiceClient -from google.cloud.aiplatform_v1.services.pipeline_service import pagers -from google.cloud.aiplatform_v1.services.pipeline_service import transports -from google.cloud.aiplatform_v1.types import artifact -from google.cloud.aiplatform_v1.types import context -from google.cloud.aiplatform_v1.types import deployed_model_ref -from google.cloud.aiplatform_v1.types import encryption_spec -from google.cloud.aiplatform_v1.types import env_var -from google.cloud.aiplatform_v1.types import execution -from google.cloud.aiplatform_v1.types import explanation -from google.cloud.aiplatform_v1.types import explanation_metadata -from google.cloud.aiplatform_v1.types import io -from google.cloud.aiplatform_v1.types import model -from google.cloud.aiplatform_v1.types import operation as gca_operation -from google.cloud.aiplatform_v1.types import pipeline_job -from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job -from google.cloud.aiplatform_v1.types import pipeline_service -from google.cloud.aiplatform_v1.types import pipeline_state -from google.cloud.aiplatform_v1.types import training_pipeline -from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline -from google.cloud.aiplatform_v1.types import value -from google.longrunning import operations_pb2 -from google.oauth2 import service_account -from google.protobuf import any_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore -import google.auth - - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert PipelineServiceClient._get_default_mtls_endpoint(None) is None - assert PipelineServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert PipelineServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert PipelineServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert PipelineServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert PipelineServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - PipelineServiceClient, - PipelineServiceAsyncClient, -]) -def test_pipeline_service_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.PipelineServiceGrpcTransport, "grpc"), - (transports.PipelineServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_pipeline_service_client_service_account_always_use_jwt(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=False) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("client_class", [ - PipelineServiceClient, - PipelineServiceAsyncClient, -]) -def test_pipeline_service_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_pipeline_service_client_get_transport_class(): - transport = PipelineServiceClient.get_transport_class() - available_transports = [ - transports.PipelineServiceGrpcTransport, - ] - assert transport in available_transports - - transport = PipelineServiceClient.get_transport_class("grpc") - assert transport == transports.PipelineServiceGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (PipelineServiceClient, transports.PipelineServiceGrpcTransport, "grpc"), - (PipelineServiceAsyncClient, transports.PipelineServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(PipelineServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PipelineServiceClient)) -@mock.patch.object(PipelineServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PipelineServiceAsyncClient)) -def test_pipeline_service_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(PipelineServiceClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(PipelineServiceClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (PipelineServiceClient, transports.PipelineServiceGrpcTransport, "grpc", "true"), - (PipelineServiceAsyncClient, transports.PipelineServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (PipelineServiceClient, transports.PipelineServiceGrpcTransport, "grpc", "false"), - (PipelineServiceAsyncClient, transports.PipelineServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(PipelineServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PipelineServiceClient)) -@mock.patch.object(PipelineServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PipelineServiceAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_pipeline_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (PipelineServiceClient, transports.PipelineServiceGrpcTransport, "grpc"), - (PipelineServiceAsyncClient, transports.PipelineServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_pipeline_service_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (PipelineServiceClient, transports.PipelineServiceGrpcTransport, "grpc"), - (PipelineServiceAsyncClient, transports.PipelineServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_pipeline_service_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_pipeline_service_client_client_options_from_dict(): - with mock.patch('google.cloud.aiplatform_v1.services.pipeline_service.transports.PipelineServiceGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = PipelineServiceClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_create_training_pipeline(transport: str = 'grpc', request_type=pipeline_service.CreateTrainingPipelineRequest): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_training_pipeline.TrainingPipeline( - name='name_value', - display_name='display_name_value', - training_task_definition='training_task_definition_value', - state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, - ) - response = client.create_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.CreateTrainingPipelineRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_training_pipeline.TrainingPipeline) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.training_task_definition == 'training_task_definition_value' - assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED - - -def test_create_training_pipeline_from_dict(): - test_create_training_pipeline(request_type=dict) - - -def test_create_training_pipeline_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_training_pipeline), - '__call__') as call: - client.create_training_pipeline() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.CreateTrainingPipelineRequest() - - -@pytest.mark.asyncio -async def test_create_training_pipeline_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.CreateTrainingPipelineRequest): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_training_pipeline.TrainingPipeline( - name='name_value', - display_name='display_name_value', - training_task_definition='training_task_definition_value', - state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, - )) - response = await client.create_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.CreateTrainingPipelineRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_training_pipeline.TrainingPipeline) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.training_task_definition == 'training_task_definition_value' - assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED - - -@pytest.mark.asyncio -async def test_create_training_pipeline_async_from_dict(): - await test_create_training_pipeline_async(request_type=dict) - - -def test_create_training_pipeline_field_headers(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.CreateTrainingPipelineRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_training_pipeline), - '__call__') as call: - call.return_value = gca_training_pipeline.TrainingPipeline() - client.create_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_training_pipeline_field_headers_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.CreateTrainingPipelineRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_training_pipeline), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_training_pipeline.TrainingPipeline()) - await client.create_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_training_pipeline_flattened(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_training_pipeline.TrainingPipeline() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_training_pipeline( - parent='parent_value', - training_pipeline=gca_training_pipeline.TrainingPipeline(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].training_pipeline - mock_val = gca_training_pipeline.TrainingPipeline(name='name_value') - assert arg == mock_val - - -def test_create_training_pipeline_flattened_error(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_training_pipeline( - pipeline_service.CreateTrainingPipelineRequest(), - parent='parent_value', - training_pipeline=gca_training_pipeline.TrainingPipeline(name='name_value'), - ) - - -@pytest.mark.asyncio -async def test_create_training_pipeline_flattened_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_training_pipeline.TrainingPipeline() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_training_pipeline.TrainingPipeline()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_training_pipeline( - parent='parent_value', - training_pipeline=gca_training_pipeline.TrainingPipeline(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].training_pipeline - mock_val = gca_training_pipeline.TrainingPipeline(name='name_value') - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_training_pipeline_flattened_error_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_training_pipeline( - pipeline_service.CreateTrainingPipelineRequest(), - parent='parent_value', - training_pipeline=gca_training_pipeline.TrainingPipeline(name='name_value'), - ) - - -def test_get_training_pipeline(transport: str = 'grpc', request_type=pipeline_service.GetTrainingPipelineRequest): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = training_pipeline.TrainingPipeline( - name='name_value', - display_name='display_name_value', - training_task_definition='training_task_definition_value', - state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, - ) - response = client.get_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.GetTrainingPipelineRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, training_pipeline.TrainingPipeline) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.training_task_definition == 'training_task_definition_value' - assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED - - -def test_get_training_pipeline_from_dict(): - test_get_training_pipeline(request_type=dict) - - -def test_get_training_pipeline_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_training_pipeline), - '__call__') as call: - client.get_training_pipeline() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.GetTrainingPipelineRequest() - - -@pytest.mark.asyncio -async def test_get_training_pipeline_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.GetTrainingPipelineRequest): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(training_pipeline.TrainingPipeline( - name='name_value', - display_name='display_name_value', - training_task_definition='training_task_definition_value', - state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, - )) - response = await client.get_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.GetTrainingPipelineRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, training_pipeline.TrainingPipeline) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.training_task_definition == 'training_task_definition_value' - assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED - - -@pytest.mark.asyncio -async def test_get_training_pipeline_async_from_dict(): - await test_get_training_pipeline_async(request_type=dict) - - -def test_get_training_pipeline_field_headers(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.GetTrainingPipelineRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_training_pipeline), - '__call__') as call: - call.return_value = training_pipeline.TrainingPipeline() - client.get_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_training_pipeline_field_headers_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.GetTrainingPipelineRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_training_pipeline), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(training_pipeline.TrainingPipeline()) - await client.get_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_training_pipeline_flattened(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = training_pipeline.TrainingPipeline() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_training_pipeline( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_training_pipeline_flattened_error(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_training_pipeline( - pipeline_service.GetTrainingPipelineRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_training_pipeline_flattened_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = training_pipeline.TrainingPipeline() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(training_pipeline.TrainingPipeline()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_training_pipeline( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_training_pipeline_flattened_error_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_training_pipeline( - pipeline_service.GetTrainingPipelineRequest(), - name='name_value', - ) - - -def test_list_training_pipelines(transport: str = 'grpc', request_type=pipeline_service.ListTrainingPipelinesRequest): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_training_pipelines), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = pipeline_service.ListTrainingPipelinesResponse( - next_page_token='next_page_token_value', - ) - response = client.list_training_pipelines(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.ListTrainingPipelinesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListTrainingPipelinesPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_training_pipelines_from_dict(): - test_list_training_pipelines(request_type=dict) - - -def test_list_training_pipelines_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_training_pipelines), - '__call__') as call: - client.list_training_pipelines() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.ListTrainingPipelinesRequest() - - -@pytest.mark.asyncio -async def test_list_training_pipelines_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.ListTrainingPipelinesRequest): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_training_pipelines), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(pipeline_service.ListTrainingPipelinesResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_training_pipelines(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.ListTrainingPipelinesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListTrainingPipelinesAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_training_pipelines_async_from_dict(): - await test_list_training_pipelines_async(request_type=dict) - - -def test_list_training_pipelines_field_headers(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.ListTrainingPipelinesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_training_pipelines), - '__call__') as call: - call.return_value = pipeline_service.ListTrainingPipelinesResponse() - client.list_training_pipelines(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_training_pipelines_field_headers_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.ListTrainingPipelinesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_training_pipelines), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pipeline_service.ListTrainingPipelinesResponse()) - await client.list_training_pipelines(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_training_pipelines_flattened(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_training_pipelines), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = pipeline_service.ListTrainingPipelinesResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_training_pipelines( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_training_pipelines_flattened_error(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_training_pipelines( - pipeline_service.ListTrainingPipelinesRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_training_pipelines_flattened_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_training_pipelines), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = pipeline_service.ListTrainingPipelinesResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pipeline_service.ListTrainingPipelinesResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_training_pipelines( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_training_pipelines_flattened_error_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_training_pipelines( - pipeline_service.ListTrainingPipelinesRequest(), - parent='parent_value', - ) - - -def test_list_training_pipelines_pager(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_training_pipelines), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[ - training_pipeline.TrainingPipeline(), - training_pipeline.TrainingPipeline(), - training_pipeline.TrainingPipeline(), - ], - next_page_token='abc', - ), - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[], - next_page_token='def', - ), - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[ - training_pipeline.TrainingPipeline(), - ], - next_page_token='ghi', - ), - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[ - training_pipeline.TrainingPipeline(), - training_pipeline.TrainingPipeline(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_training_pipelines(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, training_pipeline.TrainingPipeline) - for i in results) - -def test_list_training_pipelines_pages(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_training_pipelines), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[ - training_pipeline.TrainingPipeline(), - training_pipeline.TrainingPipeline(), - training_pipeline.TrainingPipeline(), - ], - next_page_token='abc', - ), - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[], - next_page_token='def', - ), - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[ - training_pipeline.TrainingPipeline(), - ], - next_page_token='ghi', - ), - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[ - training_pipeline.TrainingPipeline(), - training_pipeline.TrainingPipeline(), - ], - ), - RuntimeError, - ) - pages = list(client.list_training_pipelines(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_training_pipelines_async_pager(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_training_pipelines), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[ - training_pipeline.TrainingPipeline(), - training_pipeline.TrainingPipeline(), - training_pipeline.TrainingPipeline(), - ], - next_page_token='abc', - ), - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[], - next_page_token='def', - ), - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[ - training_pipeline.TrainingPipeline(), - ], - next_page_token='ghi', - ), - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[ - training_pipeline.TrainingPipeline(), - training_pipeline.TrainingPipeline(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_training_pipelines(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, training_pipeline.TrainingPipeline) - for i in responses) - -@pytest.mark.asyncio -async def test_list_training_pipelines_async_pages(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_training_pipelines), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[ - training_pipeline.TrainingPipeline(), - training_pipeline.TrainingPipeline(), - training_pipeline.TrainingPipeline(), - ], - next_page_token='abc', - ), - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[], - next_page_token='def', - ), - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[ - training_pipeline.TrainingPipeline(), - ], - next_page_token='ghi', - ), - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[ - training_pipeline.TrainingPipeline(), - training_pipeline.TrainingPipeline(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_training_pipelines(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_delete_training_pipeline(transport: str = 'grpc', request_type=pipeline_service.DeleteTrainingPipelineRequest): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.DeleteTrainingPipelineRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_training_pipeline_from_dict(): - test_delete_training_pipeline(request_type=dict) - - -def test_delete_training_pipeline_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_training_pipeline), - '__call__') as call: - client.delete_training_pipeline() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.DeleteTrainingPipelineRequest() - - -@pytest.mark.asyncio -async def test_delete_training_pipeline_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.DeleteTrainingPipelineRequest): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.DeleteTrainingPipelineRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_training_pipeline_async_from_dict(): - await test_delete_training_pipeline_async(request_type=dict) - - -def test_delete_training_pipeline_field_headers(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.DeleteTrainingPipelineRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_training_pipeline), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_training_pipeline_field_headers_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.DeleteTrainingPipelineRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_training_pipeline), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_training_pipeline_flattened(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_training_pipeline( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_delete_training_pipeline_flattened_error(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_training_pipeline( - pipeline_service.DeleteTrainingPipelineRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_training_pipeline_flattened_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_training_pipeline( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_training_pipeline_flattened_error_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_training_pipeline( - pipeline_service.DeleteTrainingPipelineRequest(), - name='name_value', - ) - - -def test_cancel_training_pipeline(transport: str = 'grpc', request_type=pipeline_service.CancelTrainingPipelineRequest): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.cancel_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.CancelTrainingPipelineRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -def test_cancel_training_pipeline_from_dict(): - test_cancel_training_pipeline(request_type=dict) - - -def test_cancel_training_pipeline_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_training_pipeline), - '__call__') as call: - client.cancel_training_pipeline() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.CancelTrainingPipelineRequest() - - -@pytest.mark.asyncio -async def test_cancel_training_pipeline_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.CancelTrainingPipelineRequest): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.cancel_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.CancelTrainingPipelineRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_cancel_training_pipeline_async_from_dict(): - await test_cancel_training_pipeline_async(request_type=dict) - - -def test_cancel_training_pipeline_field_headers(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.CancelTrainingPipelineRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_training_pipeline), - '__call__') as call: - call.return_value = None - client.cancel_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_cancel_training_pipeline_field_headers_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.CancelTrainingPipelineRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_training_pipeline), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.cancel_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_cancel_training_pipeline_flattened(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.cancel_training_pipeline( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_cancel_training_pipeline_flattened_error(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.cancel_training_pipeline( - pipeline_service.CancelTrainingPipelineRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_cancel_training_pipeline_flattened_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.cancel_training_pipeline( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_cancel_training_pipeline_flattened_error_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.cancel_training_pipeline( - pipeline_service.CancelTrainingPipelineRequest(), - name='name_value', - ) - - -def test_create_pipeline_job(transport: str = 'grpc', request_type=pipeline_service.CreatePipelineJobRequest): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_pipeline_job.PipelineJob( - name='name_value', - display_name='display_name_value', - state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, - service_account='service_account_value', - network='network_value', - ) - response = client.create_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.CreatePipelineJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_pipeline_job.PipelineJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED - assert response.service_account == 'service_account_value' - assert response.network == 'network_value' - - -def test_create_pipeline_job_from_dict(): - test_create_pipeline_job(request_type=dict) - - -def test_create_pipeline_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_pipeline_job), - '__call__') as call: - client.create_pipeline_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.CreatePipelineJobRequest() - - -@pytest.mark.asyncio -async def test_create_pipeline_job_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.CreatePipelineJobRequest): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_pipeline_job.PipelineJob( - name='name_value', - display_name='display_name_value', - state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, - service_account='service_account_value', - network='network_value', - )) - response = await client.create_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.CreatePipelineJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_pipeline_job.PipelineJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED - assert response.service_account == 'service_account_value' - assert response.network == 'network_value' - - -@pytest.mark.asyncio -async def test_create_pipeline_job_async_from_dict(): - await test_create_pipeline_job_async(request_type=dict) - - -def test_create_pipeline_job_field_headers(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.CreatePipelineJobRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_pipeline_job), - '__call__') as call: - call.return_value = gca_pipeline_job.PipelineJob() - client.create_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_pipeline_job_field_headers_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.CreatePipelineJobRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_pipeline_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_pipeline_job.PipelineJob()) - await client.create_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_pipeline_job_flattened(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_pipeline_job.PipelineJob() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_pipeline_job( - parent='parent_value', - pipeline_job=gca_pipeline_job.PipelineJob(name='name_value'), - pipeline_job_id='pipeline_job_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].pipeline_job - mock_val = gca_pipeline_job.PipelineJob(name='name_value') - assert arg == mock_val - arg = args[0].pipeline_job_id - mock_val = 'pipeline_job_id_value' - assert arg == mock_val - - -def test_create_pipeline_job_flattened_error(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_pipeline_job( - pipeline_service.CreatePipelineJobRequest(), - parent='parent_value', - pipeline_job=gca_pipeline_job.PipelineJob(name='name_value'), - pipeline_job_id='pipeline_job_id_value', - ) - - -@pytest.mark.asyncio -async def test_create_pipeline_job_flattened_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_pipeline_job.PipelineJob() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_pipeline_job.PipelineJob()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_pipeline_job( - parent='parent_value', - pipeline_job=gca_pipeline_job.PipelineJob(name='name_value'), - pipeline_job_id='pipeline_job_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].pipeline_job - mock_val = gca_pipeline_job.PipelineJob(name='name_value') - assert arg == mock_val - arg = args[0].pipeline_job_id - mock_val = 'pipeline_job_id_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_pipeline_job_flattened_error_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_pipeline_job( - pipeline_service.CreatePipelineJobRequest(), - parent='parent_value', - pipeline_job=gca_pipeline_job.PipelineJob(name='name_value'), - pipeline_job_id='pipeline_job_id_value', - ) - - -def test_get_pipeline_job(transport: str = 'grpc', request_type=pipeline_service.GetPipelineJobRequest): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = pipeline_job.PipelineJob( - name='name_value', - display_name='display_name_value', - state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, - service_account='service_account_value', - network='network_value', - ) - response = client.get_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.GetPipelineJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pipeline_job.PipelineJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED - assert response.service_account == 'service_account_value' - assert response.network == 'network_value' - - -def test_get_pipeline_job_from_dict(): - test_get_pipeline_job(request_type=dict) - - -def test_get_pipeline_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_pipeline_job), - '__call__') as call: - client.get_pipeline_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.GetPipelineJobRequest() - - -@pytest.mark.asyncio -async def test_get_pipeline_job_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.GetPipelineJobRequest): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(pipeline_job.PipelineJob( - name='name_value', - display_name='display_name_value', - state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, - service_account='service_account_value', - network='network_value', - )) - response = await client.get_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.GetPipelineJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pipeline_job.PipelineJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED - assert response.service_account == 'service_account_value' - assert response.network == 'network_value' - - -@pytest.mark.asyncio -async def test_get_pipeline_job_async_from_dict(): - await test_get_pipeline_job_async(request_type=dict) - - -def test_get_pipeline_job_field_headers(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.GetPipelineJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_pipeline_job), - '__call__') as call: - call.return_value = pipeline_job.PipelineJob() - client.get_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_pipeline_job_field_headers_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.GetPipelineJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_pipeline_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pipeline_job.PipelineJob()) - await client.get_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_pipeline_job_flattened(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = pipeline_job.PipelineJob() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_pipeline_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_pipeline_job_flattened_error(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_pipeline_job( - pipeline_service.GetPipelineJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_pipeline_job_flattened_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = pipeline_job.PipelineJob() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pipeline_job.PipelineJob()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_pipeline_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_pipeline_job_flattened_error_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_pipeline_job( - pipeline_service.GetPipelineJobRequest(), - name='name_value', - ) - - -def test_list_pipeline_jobs(transport: str = 'grpc', request_type=pipeline_service.ListPipelineJobsRequest): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_pipeline_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = pipeline_service.ListPipelineJobsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_pipeline_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.ListPipelineJobsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListPipelineJobsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_pipeline_jobs_from_dict(): - test_list_pipeline_jobs(request_type=dict) - - -def test_list_pipeline_jobs_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_pipeline_jobs), - '__call__') as call: - client.list_pipeline_jobs() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.ListPipelineJobsRequest() - - -@pytest.mark.asyncio -async def test_list_pipeline_jobs_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.ListPipelineJobsRequest): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_pipeline_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(pipeline_service.ListPipelineJobsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_pipeline_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.ListPipelineJobsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListPipelineJobsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_pipeline_jobs_async_from_dict(): - await test_list_pipeline_jobs_async(request_type=dict) - - -def test_list_pipeline_jobs_field_headers(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.ListPipelineJobsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_pipeline_jobs), - '__call__') as call: - call.return_value = pipeline_service.ListPipelineJobsResponse() - client.list_pipeline_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_pipeline_jobs_field_headers_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.ListPipelineJobsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_pipeline_jobs), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pipeline_service.ListPipelineJobsResponse()) - await client.list_pipeline_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_pipeline_jobs_flattened(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_pipeline_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = pipeline_service.ListPipelineJobsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_pipeline_jobs( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_pipeline_jobs_flattened_error(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_pipeline_jobs( - pipeline_service.ListPipelineJobsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_pipeline_jobs_flattened_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_pipeline_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = pipeline_service.ListPipelineJobsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pipeline_service.ListPipelineJobsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_pipeline_jobs( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_pipeline_jobs_flattened_error_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_pipeline_jobs( - pipeline_service.ListPipelineJobsRequest(), - parent='parent_value', - ) - - -def test_list_pipeline_jobs_pager(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_pipeline_jobs), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[ - pipeline_job.PipelineJob(), - pipeline_job.PipelineJob(), - pipeline_job.PipelineJob(), - ], - next_page_token='abc', - ), - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[], - next_page_token='def', - ), - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[ - pipeline_job.PipelineJob(), - ], - next_page_token='ghi', - ), - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[ - pipeline_job.PipelineJob(), - pipeline_job.PipelineJob(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_pipeline_jobs(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, pipeline_job.PipelineJob) - for i in results) - -def test_list_pipeline_jobs_pages(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_pipeline_jobs), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[ - pipeline_job.PipelineJob(), - pipeline_job.PipelineJob(), - pipeline_job.PipelineJob(), - ], - next_page_token='abc', - ), - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[], - next_page_token='def', - ), - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[ - pipeline_job.PipelineJob(), - ], - next_page_token='ghi', - ), - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[ - pipeline_job.PipelineJob(), - pipeline_job.PipelineJob(), - ], - ), - RuntimeError, - ) - pages = list(client.list_pipeline_jobs(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_pipeline_jobs_async_pager(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_pipeline_jobs), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[ - pipeline_job.PipelineJob(), - pipeline_job.PipelineJob(), - pipeline_job.PipelineJob(), - ], - next_page_token='abc', - ), - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[], - next_page_token='def', - ), - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[ - pipeline_job.PipelineJob(), - ], - next_page_token='ghi', - ), - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[ - pipeline_job.PipelineJob(), - pipeline_job.PipelineJob(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_pipeline_jobs(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, pipeline_job.PipelineJob) - for i in responses) - -@pytest.mark.asyncio -async def test_list_pipeline_jobs_async_pages(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_pipeline_jobs), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[ - pipeline_job.PipelineJob(), - pipeline_job.PipelineJob(), - pipeline_job.PipelineJob(), - ], - next_page_token='abc', - ), - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[], - next_page_token='def', - ), - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[ - pipeline_job.PipelineJob(), - ], - next_page_token='ghi', - ), - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[ - pipeline_job.PipelineJob(), - pipeline_job.PipelineJob(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_pipeline_jobs(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_delete_pipeline_job(transport: str = 'grpc', request_type=pipeline_service.DeletePipelineJobRequest): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.DeletePipelineJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_pipeline_job_from_dict(): - test_delete_pipeline_job(request_type=dict) - - -def test_delete_pipeline_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_pipeline_job), - '__call__') as call: - client.delete_pipeline_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.DeletePipelineJobRequest() - - -@pytest.mark.asyncio -async def test_delete_pipeline_job_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.DeletePipelineJobRequest): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.DeletePipelineJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_pipeline_job_async_from_dict(): - await test_delete_pipeline_job_async(request_type=dict) - - -def test_delete_pipeline_job_field_headers(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.DeletePipelineJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_pipeline_job), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_pipeline_job_field_headers_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.DeletePipelineJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_pipeline_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_pipeline_job_flattened(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_pipeline_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_delete_pipeline_job_flattened_error(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_pipeline_job( - pipeline_service.DeletePipelineJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_pipeline_job_flattened_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_pipeline_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_pipeline_job_flattened_error_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_pipeline_job( - pipeline_service.DeletePipelineJobRequest(), - name='name_value', - ) - - -def test_cancel_pipeline_job(transport: str = 'grpc', request_type=pipeline_service.CancelPipelineJobRequest): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.cancel_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.CancelPipelineJobRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -def test_cancel_pipeline_job_from_dict(): - test_cancel_pipeline_job(request_type=dict) - - -def test_cancel_pipeline_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_pipeline_job), - '__call__') as call: - client.cancel_pipeline_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.CancelPipelineJobRequest() - - -@pytest.mark.asyncio -async def test_cancel_pipeline_job_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.CancelPipelineJobRequest): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.cancel_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.CancelPipelineJobRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_cancel_pipeline_job_async_from_dict(): - await test_cancel_pipeline_job_async(request_type=dict) - - -def test_cancel_pipeline_job_field_headers(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.CancelPipelineJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_pipeline_job), - '__call__') as call: - call.return_value = None - client.cancel_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_cancel_pipeline_job_field_headers_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.CancelPipelineJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_pipeline_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.cancel_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_cancel_pipeline_job_flattened(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.cancel_pipeline_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_cancel_pipeline_job_flattened_error(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.cancel_pipeline_job( - pipeline_service.CancelPipelineJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_cancel_pipeline_job_flattened_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.cancel_pipeline_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_cancel_pipeline_job_flattened_error_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.cancel_pipeline_job( - pipeline_service.CancelPipelineJobRequest(), - name='name_value', - ) - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.PipelineServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.PipelineServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = PipelineServiceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.PipelineServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = PipelineServiceClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.PipelineServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = PipelineServiceClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.PipelineServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.PipelineServiceGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.PipelineServiceGrpcTransport, - transports.PipelineServiceGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.PipelineServiceGrpcTransport, - ) - -def test_pipeline_service_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.PipelineServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_pipeline_service_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.aiplatform_v1.services.pipeline_service.transports.PipelineServiceTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.PipelineServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'create_training_pipeline', - 'get_training_pipeline', - 'list_training_pipelines', - 'delete_training_pipeline', - 'cancel_training_pipeline', - 'create_pipeline_job', - 'get_pipeline_job', - 'list_pipeline_jobs', - 'delete_pipeline_job', - 'cancel_pipeline_job', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - with pytest.raises(NotImplementedError): - transport.close() - - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client - - -def test_pipeline_service_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.pipeline_service.transports.PipelineServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.PipelineServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -def test_pipeline_service_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1.services.pipeline_service.transports.PipelineServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.PipelineServiceTransport() - adc.assert_called_once() - - -def test_pipeline_service_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - PipelineServiceClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.PipelineServiceGrpcTransport, - transports.PipelineServiceGrpcAsyncIOTransport, - ], -) -def test_pipeline_service_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.PipelineServiceGrpcTransport, grpc_helpers), - (transports.PipelineServiceGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_pipeline_service_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "aiplatform.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="aiplatform.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.PipelineServiceGrpcTransport, transports.PipelineServiceGrpcAsyncIOTransport]) -def test_pipeline_service_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_pipeline_service_host_no_port(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_pipeline_service_host_with_port(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:8000' - -def test_pipeline_service_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.PipelineServiceGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_pipeline_service_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.PipelineServiceGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.PipelineServiceGrpcTransport, transports.PipelineServiceGrpcAsyncIOTransport]) -def test_pipeline_service_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.PipelineServiceGrpcTransport, transports.PipelineServiceGrpcAsyncIOTransport]) -def test_pipeline_service_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_pipeline_service_grpc_lro_client(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_pipeline_service_grpc_lro_async_client(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc_asyncio', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsAsyncClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_artifact_path(): - project = "squid" - location = "clam" - metadata_store = "whelk" - artifact = "octopus" - expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}".format(project=project, location=location, metadata_store=metadata_store, artifact=artifact, ) - actual = PipelineServiceClient.artifact_path(project, location, metadata_store, artifact) - assert expected == actual - - -def test_parse_artifact_path(): - expected = { - "project": "oyster", - "location": "nudibranch", - "metadata_store": "cuttlefish", - "artifact": "mussel", - } - path = PipelineServiceClient.artifact_path(**expected) - - # Check that the path construction is reversible. - actual = PipelineServiceClient.parse_artifact_path(path) - assert expected == actual - -def test_context_path(): - project = "winkle" - location = "nautilus" - metadata_store = "scallop" - context = "abalone" - expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}".format(project=project, location=location, metadata_store=metadata_store, context=context, ) - actual = PipelineServiceClient.context_path(project, location, metadata_store, context) - assert expected == actual - - -def test_parse_context_path(): - expected = { - "project": "squid", - "location": "clam", - "metadata_store": "whelk", - "context": "octopus", - } - path = PipelineServiceClient.context_path(**expected) - - # Check that the path construction is reversible. - actual = PipelineServiceClient.parse_context_path(path) - assert expected == actual - -def test_custom_job_path(): - project = "oyster" - location = "nudibranch" - custom_job = "cuttlefish" - expected = "projects/{project}/locations/{location}/customJobs/{custom_job}".format(project=project, location=location, custom_job=custom_job, ) - actual = PipelineServiceClient.custom_job_path(project, location, custom_job) - assert expected == actual - - -def test_parse_custom_job_path(): - expected = { - "project": "mussel", - "location": "winkle", - "custom_job": "nautilus", - } - path = PipelineServiceClient.custom_job_path(**expected) - - # Check that the path construction is reversible. - actual = PipelineServiceClient.parse_custom_job_path(path) - assert expected == actual - -def test_endpoint_path(): - project = "scallop" - location = "abalone" - endpoint = "squid" - expected = "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) - actual = PipelineServiceClient.endpoint_path(project, location, endpoint) - assert expected == actual - - -def test_parse_endpoint_path(): - expected = { - "project": "clam", - "location": "whelk", - "endpoint": "octopus", - } - path = PipelineServiceClient.endpoint_path(**expected) - - # Check that the path construction is reversible. - actual = PipelineServiceClient.parse_endpoint_path(path) - assert expected == actual - -def test_execution_path(): - project = "oyster" - location = "nudibranch" - metadata_store = "cuttlefish" - execution = "mussel" - expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}".format(project=project, location=location, metadata_store=metadata_store, execution=execution, ) - actual = PipelineServiceClient.execution_path(project, location, metadata_store, execution) - assert expected == actual - - -def test_parse_execution_path(): - expected = { - "project": "winkle", - "location": "nautilus", - "metadata_store": "scallop", - "execution": "abalone", - } - path = PipelineServiceClient.execution_path(**expected) - - # Check that the path construction is reversible. - actual = PipelineServiceClient.parse_execution_path(path) - assert expected == actual - -def test_model_path(): - project = "squid" - location = "clam" - model = "whelk" - expected = "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) - actual = PipelineServiceClient.model_path(project, location, model) - assert expected == actual - - -def test_parse_model_path(): - expected = { - "project": "octopus", - "location": "oyster", - "model": "nudibranch", - } - path = PipelineServiceClient.model_path(**expected) - - # Check that the path construction is reversible. - actual = PipelineServiceClient.parse_model_path(path) - assert expected == actual - -def test_network_path(): - project = "cuttlefish" - network = "mussel" - expected = "projects/{project}/global/networks/{network}".format(project=project, network=network, ) - actual = PipelineServiceClient.network_path(project, network) - assert expected == actual - - -def test_parse_network_path(): - expected = { - "project": "winkle", - "network": "nautilus", - } - path = PipelineServiceClient.network_path(**expected) - - # Check that the path construction is reversible. - actual = PipelineServiceClient.parse_network_path(path) - assert expected == actual - -def test_pipeline_job_path(): - project = "scallop" - location = "abalone" - pipeline_job = "squid" - expected = "projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}".format(project=project, location=location, pipeline_job=pipeline_job, ) - actual = PipelineServiceClient.pipeline_job_path(project, location, pipeline_job) - assert expected == actual - - -def test_parse_pipeline_job_path(): - expected = { - "project": "clam", - "location": "whelk", - "pipeline_job": "octopus", - } - path = PipelineServiceClient.pipeline_job_path(**expected) - - # Check that the path construction is reversible. - actual = PipelineServiceClient.parse_pipeline_job_path(path) - assert expected == actual - -def test_training_pipeline_path(): - project = "oyster" - location = "nudibranch" - training_pipeline = "cuttlefish" - expected = "projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}".format(project=project, location=location, training_pipeline=training_pipeline, ) - actual = PipelineServiceClient.training_pipeline_path(project, location, training_pipeline) - assert expected == actual - - -def test_parse_training_pipeline_path(): - expected = { - "project": "mussel", - "location": "winkle", - "training_pipeline": "nautilus", - } - path = PipelineServiceClient.training_pipeline_path(**expected) - - # Check that the path construction is reversible. - actual = PipelineServiceClient.parse_training_pipeline_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "scallop" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = PipelineServiceClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "abalone", - } - path = PipelineServiceClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = PipelineServiceClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "squid" - expected = "folders/{folder}".format(folder=folder, ) - actual = PipelineServiceClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "clam", - } - path = PipelineServiceClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = PipelineServiceClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "whelk" - expected = "organizations/{organization}".format(organization=organization, ) - actual = PipelineServiceClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "octopus", - } - path = PipelineServiceClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = PipelineServiceClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "oyster" - expected = "projects/{project}".format(project=project, ) - actual = PipelineServiceClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "nudibranch", - } - path = PipelineServiceClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = PipelineServiceClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "cuttlefish" - location = "mussel" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = PipelineServiceClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "winkle", - "location": "nautilus", - } - path = PipelineServiceClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = PipelineServiceClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.PipelineServiceTransport, '_prep_wrapped_messages') as prep: - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.PipelineServiceTransport, '_prep_wrapped_messages') as prep: - transport_class = PipelineServiceClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - -@pytest.mark.asyncio -async def test_transport_close_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: - async with client: - close.assert_not_called() - close.assert_called_once() - -def test_transport_close(): - transports = { - "grpc": "_grpc_channel", - } - - for transport, close_name in transports.items(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: - with client: - close.assert_not_called() - close.assert_called_once() - -def test_client_ctx(): - transports = [ - 'grpc', - ] - for transport in transports: - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - # Test client calls underlying transport. - with mock.patch.object(type(client.transport), "close") as close: - close.assert_not_called() - with client: - pass - close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_prediction_service.py b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_prediction_service.py deleted file mode 100644 index 2646a84623..0000000000 --- a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_prediction_service.py +++ /dev/null @@ -1,1734 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api import httpbody_pb2 # type: ignore -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import path_template -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.aiplatform_v1.services.prediction_service import PredictionServiceAsyncClient -from google.cloud.aiplatform_v1.services.prediction_service import PredictionServiceClient -from google.cloud.aiplatform_v1.services.prediction_service import transports -from google.cloud.aiplatform_v1.types import explanation -from google.cloud.aiplatform_v1.types import prediction_service -from google.oauth2 import service_account -from google.protobuf import any_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -import google.auth - - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert PredictionServiceClient._get_default_mtls_endpoint(None) is None - assert PredictionServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert PredictionServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert PredictionServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert PredictionServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert PredictionServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - PredictionServiceClient, - PredictionServiceAsyncClient, -]) -def test_prediction_service_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.PredictionServiceGrpcTransport, "grpc"), - (transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_prediction_service_client_service_account_always_use_jwt(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=False) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("client_class", [ - PredictionServiceClient, - PredictionServiceAsyncClient, -]) -def test_prediction_service_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_prediction_service_client_get_transport_class(): - transport = PredictionServiceClient.get_transport_class() - available_transports = [ - transports.PredictionServiceGrpcTransport, - ] - assert transport in available_transports - - transport = PredictionServiceClient.get_transport_class("grpc") - assert transport == transports.PredictionServiceGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc"), - (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(PredictionServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceClient)) -@mock.patch.object(PredictionServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceAsyncClient)) -def test_prediction_service_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(PredictionServiceClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(PredictionServiceClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc", "true"), - (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc", "false"), - (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(PredictionServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceClient)) -@mock.patch.object(PredictionServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_prediction_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc"), - (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_prediction_service_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc"), - (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_prediction_service_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_prediction_service_client_client_options_from_dict(): - with mock.patch('google.cloud.aiplatform_v1.services.prediction_service.transports.PredictionServiceGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = PredictionServiceClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_predict(transport: str = 'grpc', request_type=prediction_service.PredictRequest): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.predict), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = prediction_service.PredictResponse( - deployed_model_id='deployed_model_id_value', - model='model_value', - model_display_name='model_display_name_value', - ) - response = client.predict(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == prediction_service.PredictRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, prediction_service.PredictResponse) - assert response.deployed_model_id == 'deployed_model_id_value' - assert response.model == 'model_value' - assert response.model_display_name == 'model_display_name_value' - - -def test_predict_from_dict(): - test_predict(request_type=dict) - - -def test_predict_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.predict), - '__call__') as call: - client.predict() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == prediction_service.PredictRequest() - - -@pytest.mark.asyncio -async def test_predict_async(transport: str = 'grpc_asyncio', request_type=prediction_service.PredictRequest): - client = PredictionServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.predict), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(prediction_service.PredictResponse( - deployed_model_id='deployed_model_id_value', - model='model_value', - model_display_name='model_display_name_value', - )) - response = await client.predict(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == prediction_service.PredictRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, prediction_service.PredictResponse) - assert response.deployed_model_id == 'deployed_model_id_value' - assert response.model == 'model_value' - assert response.model_display_name == 'model_display_name_value' - - -@pytest.mark.asyncio -async def test_predict_async_from_dict(): - await test_predict_async(request_type=dict) - - -def test_predict_field_headers(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = prediction_service.PredictRequest() - - request.endpoint = 'endpoint/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.predict), - '__call__') as call: - call.return_value = prediction_service.PredictResponse() - client.predict(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'endpoint=endpoint/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_predict_field_headers_async(): - client = PredictionServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = prediction_service.PredictRequest() - - request.endpoint = 'endpoint/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.predict), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(prediction_service.PredictResponse()) - await client.predict(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'endpoint=endpoint/value', - ) in kw['metadata'] - - -def test_predict_flattened(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.predict), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = prediction_service.PredictResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.predict( - endpoint='endpoint_value', - instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], - parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].endpoint - mock_val = 'endpoint_value' - assert arg == mock_val - arg = args[0].instances - mock_val = [struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)] - assert arg == mock_val - arg = args[0].parameters - mock_val = struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE) - from proto.marshal import Marshal - from proto.marshal.rules.struct import ValueRule - rule = ValueRule(marshal=Marshal(name="Test")) - mock_val = rule.to_python(mock_val) - assert arg == mock_val - - -def test_predict_flattened_error(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.predict( - prediction_service.PredictRequest(), - endpoint='endpoint_value', - instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], - parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), - ) - - -@pytest.mark.asyncio -async def test_predict_flattened_async(): - client = PredictionServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.predict), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = prediction_service.PredictResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(prediction_service.PredictResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.predict( - endpoint='endpoint_value', - instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], - parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].endpoint - mock_val = 'endpoint_value' - assert arg == mock_val - arg = args[0].instances - mock_val = [struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)] - assert arg == mock_val - arg = args[0].parameters - mock_val = struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE) - from proto.marshal import Marshal - from proto.marshal.rules.struct import ValueRule - rule = ValueRule(marshal=Marshal(name="Test")) - mock_val = rule.to_python(mock_val) - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_predict_flattened_error_async(): - client = PredictionServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.predict( - prediction_service.PredictRequest(), - endpoint='endpoint_value', - instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], - parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), - ) - - -def test_raw_predict(transport: str = 'grpc', request_type=prediction_service.RawPredictRequest): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.raw_predict), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = httpbody_pb2.HttpBody( - content_type='content_type_value', - data=b'data_blob', - ) - response = client.raw_predict(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == prediction_service.RawPredictRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, httpbody_pb2.HttpBody) - assert response.content_type == 'content_type_value' - assert response.data == b'data_blob' - - -def test_raw_predict_from_dict(): - test_raw_predict(request_type=dict) - - -def test_raw_predict_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.raw_predict), - '__call__') as call: - client.raw_predict() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == prediction_service.RawPredictRequest() - - -@pytest.mark.asyncio -async def test_raw_predict_async(transport: str = 'grpc_asyncio', request_type=prediction_service.RawPredictRequest): - client = PredictionServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.raw_predict), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(httpbody_pb2.HttpBody( - content_type='content_type_value', - data=b'data_blob', - )) - response = await client.raw_predict(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == prediction_service.RawPredictRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, httpbody_pb2.HttpBody) - assert response.content_type == 'content_type_value' - assert response.data == b'data_blob' - - -@pytest.mark.asyncio -async def test_raw_predict_async_from_dict(): - await test_raw_predict_async(request_type=dict) - - -def test_raw_predict_field_headers(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = prediction_service.RawPredictRequest() - - request.endpoint = 'endpoint/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.raw_predict), - '__call__') as call: - call.return_value = httpbody_pb2.HttpBody() - client.raw_predict(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'endpoint=endpoint/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_raw_predict_field_headers_async(): - client = PredictionServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = prediction_service.RawPredictRequest() - - request.endpoint = 'endpoint/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.raw_predict), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(httpbody_pb2.HttpBody()) - await client.raw_predict(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'endpoint=endpoint/value', - ) in kw['metadata'] - - -def test_raw_predict_flattened(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.raw_predict), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = httpbody_pb2.HttpBody() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.raw_predict( - endpoint='endpoint_value', - http_body=httpbody_pb2.HttpBody(content_type='content_type_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].endpoint - mock_val = 'endpoint_value' - assert arg == mock_val - arg = args[0].http_body - mock_val = httpbody_pb2.HttpBody(content_type='content_type_value') - assert arg == mock_val - - -def test_raw_predict_flattened_error(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.raw_predict( - prediction_service.RawPredictRequest(), - endpoint='endpoint_value', - http_body=httpbody_pb2.HttpBody(content_type='content_type_value'), - ) - - -@pytest.mark.asyncio -async def test_raw_predict_flattened_async(): - client = PredictionServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.raw_predict), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = httpbody_pb2.HttpBody() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(httpbody_pb2.HttpBody()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.raw_predict( - endpoint='endpoint_value', - http_body=httpbody_pb2.HttpBody(content_type='content_type_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].endpoint - mock_val = 'endpoint_value' - assert arg == mock_val - arg = args[0].http_body - mock_val = httpbody_pb2.HttpBody(content_type='content_type_value') - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_raw_predict_flattened_error_async(): - client = PredictionServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.raw_predict( - prediction_service.RawPredictRequest(), - endpoint='endpoint_value', - http_body=httpbody_pb2.HttpBody(content_type='content_type_value'), - ) - - -def test_explain(transport: str = 'grpc', request_type=prediction_service.ExplainRequest): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.explain), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = prediction_service.ExplainResponse( - deployed_model_id='deployed_model_id_value', - ) - response = client.explain(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == prediction_service.ExplainRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, prediction_service.ExplainResponse) - assert response.deployed_model_id == 'deployed_model_id_value' - - -def test_explain_from_dict(): - test_explain(request_type=dict) - - -def test_explain_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.explain), - '__call__') as call: - client.explain() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == prediction_service.ExplainRequest() - - -@pytest.mark.asyncio -async def test_explain_async(transport: str = 'grpc_asyncio', request_type=prediction_service.ExplainRequest): - client = PredictionServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.explain), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(prediction_service.ExplainResponse( - deployed_model_id='deployed_model_id_value', - )) - response = await client.explain(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == prediction_service.ExplainRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, prediction_service.ExplainResponse) - assert response.deployed_model_id == 'deployed_model_id_value' - - -@pytest.mark.asyncio -async def test_explain_async_from_dict(): - await test_explain_async(request_type=dict) - - -def test_explain_field_headers(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = prediction_service.ExplainRequest() - - request.endpoint = 'endpoint/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.explain), - '__call__') as call: - call.return_value = prediction_service.ExplainResponse() - client.explain(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'endpoint=endpoint/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_explain_field_headers_async(): - client = PredictionServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = prediction_service.ExplainRequest() - - request.endpoint = 'endpoint/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.explain), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(prediction_service.ExplainResponse()) - await client.explain(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'endpoint=endpoint/value', - ) in kw['metadata'] - - -def test_explain_flattened(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.explain), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = prediction_service.ExplainResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.explain( - endpoint='endpoint_value', - instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], - parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), - deployed_model_id='deployed_model_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].endpoint - mock_val = 'endpoint_value' - assert arg == mock_val - arg = args[0].instances - mock_val = [struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)] - assert arg == mock_val - arg = args[0].parameters - mock_val = struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE) - from proto.marshal import Marshal - from proto.marshal.rules.struct import ValueRule - rule = ValueRule(marshal=Marshal(name="Test")) - mock_val = rule.to_python(mock_val) - assert arg == mock_val - arg = args[0].deployed_model_id - mock_val = 'deployed_model_id_value' - assert arg == mock_val - - -def test_explain_flattened_error(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.explain( - prediction_service.ExplainRequest(), - endpoint='endpoint_value', - instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], - parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), - deployed_model_id='deployed_model_id_value', - ) - - -@pytest.mark.asyncio -async def test_explain_flattened_async(): - client = PredictionServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.explain), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = prediction_service.ExplainResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(prediction_service.ExplainResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.explain( - endpoint='endpoint_value', - instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], - parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), - deployed_model_id='deployed_model_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].endpoint - mock_val = 'endpoint_value' - assert arg == mock_val - arg = args[0].instances - mock_val = [struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)] - assert arg == mock_val - arg = args[0].parameters - mock_val = struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE) - from proto.marshal import Marshal - from proto.marshal.rules.struct import ValueRule - rule = ValueRule(marshal=Marshal(name="Test")) - mock_val = rule.to_python(mock_val) - assert arg == mock_val - arg = args[0].deployed_model_id - mock_val = 'deployed_model_id_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_explain_flattened_error_async(): - client = PredictionServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.explain( - prediction_service.ExplainRequest(), - endpoint='endpoint_value', - instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], - parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), - deployed_model_id='deployed_model_id_value', - ) - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.PredictionServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.PredictionServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = PredictionServiceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.PredictionServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = PredictionServiceClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.PredictionServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = PredictionServiceClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.PredictionServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.PredictionServiceGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.PredictionServiceGrpcTransport, - transports.PredictionServiceGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.PredictionServiceGrpcTransport, - ) - -def test_prediction_service_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.PredictionServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_prediction_service_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.aiplatform_v1.services.prediction_service.transports.PredictionServiceTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.PredictionServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'predict', - 'raw_predict', - 'explain', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - with pytest.raises(NotImplementedError): - transport.close() - - -def test_prediction_service_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.prediction_service.transports.PredictionServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.PredictionServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -def test_prediction_service_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1.services.prediction_service.transports.PredictionServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.PredictionServiceTransport() - adc.assert_called_once() - - -def test_prediction_service_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - PredictionServiceClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.PredictionServiceGrpcTransport, - transports.PredictionServiceGrpcAsyncIOTransport, - ], -) -def test_prediction_service_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.PredictionServiceGrpcTransport, grpc_helpers), - (transports.PredictionServiceGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_prediction_service_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "aiplatform.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="aiplatform.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.PredictionServiceGrpcTransport, transports.PredictionServiceGrpcAsyncIOTransport]) -def test_prediction_service_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_prediction_service_host_no_port(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_prediction_service_host_with_port(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:8000' - -def test_prediction_service_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.PredictionServiceGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_prediction_service_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.PredictionServiceGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.PredictionServiceGrpcTransport, transports.PredictionServiceGrpcAsyncIOTransport]) -def test_prediction_service_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.PredictionServiceGrpcTransport, transports.PredictionServiceGrpcAsyncIOTransport]) -def test_prediction_service_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_endpoint_path(): - project = "squid" - location = "clam" - endpoint = "whelk" - expected = "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) - actual = PredictionServiceClient.endpoint_path(project, location, endpoint) - assert expected == actual - - -def test_parse_endpoint_path(): - expected = { - "project": "octopus", - "location": "oyster", - "endpoint": "nudibranch", - } - path = PredictionServiceClient.endpoint_path(**expected) - - # Check that the path construction is reversible. - actual = PredictionServiceClient.parse_endpoint_path(path) - assert expected == actual - -def test_model_path(): - project = "cuttlefish" - location = "mussel" - model = "winkle" - expected = "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) - actual = PredictionServiceClient.model_path(project, location, model) - assert expected == actual - - -def test_parse_model_path(): - expected = { - "project": "nautilus", - "location": "scallop", - "model": "abalone", - } - path = PredictionServiceClient.model_path(**expected) - - # Check that the path construction is reversible. - actual = PredictionServiceClient.parse_model_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "squid" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = PredictionServiceClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "clam", - } - path = PredictionServiceClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = PredictionServiceClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "whelk" - expected = "folders/{folder}".format(folder=folder, ) - actual = PredictionServiceClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "octopus", - } - path = PredictionServiceClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = PredictionServiceClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "oyster" - expected = "organizations/{organization}".format(organization=organization, ) - actual = PredictionServiceClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "nudibranch", - } - path = PredictionServiceClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = PredictionServiceClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "cuttlefish" - expected = "projects/{project}".format(project=project, ) - actual = PredictionServiceClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "mussel", - } - path = PredictionServiceClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = PredictionServiceClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "winkle" - location = "nautilus" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = PredictionServiceClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "scallop", - "location": "abalone", - } - path = PredictionServiceClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = PredictionServiceClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.PredictionServiceTransport, '_prep_wrapped_messages') as prep: - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.PredictionServiceTransport, '_prep_wrapped_messages') as prep: - transport_class = PredictionServiceClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - -@pytest.mark.asyncio -async def test_transport_close_async(): - client = PredictionServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: - async with client: - close.assert_not_called() - close.assert_called_once() - -def test_transport_close(): - transports = { - "grpc": "_grpc_channel", - } - - for transport, close_name in transports.items(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: - with client: - close.assert_not_called() - close.assert_called_once() - -def test_client_ctx(): - transports = [ - 'grpc', - ] - for transport in transports: - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - # Test client calls underlying transport. - with mock.patch.object(type(client.transport), "close") as close: - close.assert_not_called() - with client: - pass - close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_specialist_pool_service.py b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_specialist_pool_service.py deleted file mode 100644 index c29f284776..0000000000 --- a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_specialist_pool_service.py +++ /dev/null @@ -1,2361 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import future -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import operation_async # type: ignore -from google.api_core import operations_v1 -from google.api_core import path_template -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.aiplatform_v1.services.specialist_pool_service import SpecialistPoolServiceAsyncClient -from google.cloud.aiplatform_v1.services.specialist_pool_service import SpecialistPoolServiceClient -from google.cloud.aiplatform_v1.services.specialist_pool_service import pagers -from google.cloud.aiplatform_v1.services.specialist_pool_service import transports -from google.cloud.aiplatform_v1.types import operation as gca_operation -from google.cloud.aiplatform_v1.types import specialist_pool -from google.cloud.aiplatform_v1.types import specialist_pool as gca_specialist_pool -from google.cloud.aiplatform_v1.types import specialist_pool_service -from google.longrunning import operations_pb2 -from google.oauth2 import service_account -from google.protobuf import field_mask_pb2 # type: ignore -import google.auth - - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert SpecialistPoolServiceClient._get_default_mtls_endpoint(None) is None - assert SpecialistPoolServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert SpecialistPoolServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert SpecialistPoolServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert SpecialistPoolServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert SpecialistPoolServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - SpecialistPoolServiceClient, - SpecialistPoolServiceAsyncClient, -]) -def test_specialist_pool_service_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.SpecialistPoolServiceGrpcTransport, "grpc"), - (transports.SpecialistPoolServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_specialist_pool_service_client_service_account_always_use_jwt(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=False) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("client_class", [ - SpecialistPoolServiceClient, - SpecialistPoolServiceAsyncClient, -]) -def test_specialist_pool_service_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_specialist_pool_service_client_get_transport_class(): - transport = SpecialistPoolServiceClient.get_transport_class() - available_transports = [ - transports.SpecialistPoolServiceGrpcTransport, - ] - assert transport in available_transports - - transport = SpecialistPoolServiceClient.get_transport_class("grpc") - assert transport == transports.SpecialistPoolServiceGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (SpecialistPoolServiceClient, transports.SpecialistPoolServiceGrpcTransport, "grpc"), - (SpecialistPoolServiceAsyncClient, transports.SpecialistPoolServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(SpecialistPoolServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpecialistPoolServiceClient)) -@mock.patch.object(SpecialistPoolServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpecialistPoolServiceAsyncClient)) -def test_specialist_pool_service_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(SpecialistPoolServiceClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(SpecialistPoolServiceClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (SpecialistPoolServiceClient, transports.SpecialistPoolServiceGrpcTransport, "grpc", "true"), - (SpecialistPoolServiceAsyncClient, transports.SpecialistPoolServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (SpecialistPoolServiceClient, transports.SpecialistPoolServiceGrpcTransport, "grpc", "false"), - (SpecialistPoolServiceAsyncClient, transports.SpecialistPoolServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(SpecialistPoolServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpecialistPoolServiceClient)) -@mock.patch.object(SpecialistPoolServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpecialistPoolServiceAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_specialist_pool_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (SpecialistPoolServiceClient, transports.SpecialistPoolServiceGrpcTransport, "grpc"), - (SpecialistPoolServiceAsyncClient, transports.SpecialistPoolServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_specialist_pool_service_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (SpecialistPoolServiceClient, transports.SpecialistPoolServiceGrpcTransport, "grpc"), - (SpecialistPoolServiceAsyncClient, transports.SpecialistPoolServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_specialist_pool_service_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_specialist_pool_service_client_client_options_from_dict(): - with mock.patch('google.cloud.aiplatform_v1.services.specialist_pool_service.transports.SpecialistPoolServiceGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = SpecialistPoolServiceClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_create_specialist_pool(transport: str = 'grpc', request_type=specialist_pool_service.CreateSpecialistPoolRequest): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.create_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == specialist_pool_service.CreateSpecialistPoolRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_create_specialist_pool_from_dict(): - test_create_specialist_pool(request_type=dict) - - -def test_create_specialist_pool_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_specialist_pool), - '__call__') as call: - client.create_specialist_pool() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == specialist_pool_service.CreateSpecialistPoolRequest() - - -@pytest.mark.asyncio -async def test_create_specialist_pool_async(transport: str = 'grpc_asyncio', request_type=specialist_pool_service.CreateSpecialistPoolRequest): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.create_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == specialist_pool_service.CreateSpecialistPoolRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_create_specialist_pool_async_from_dict(): - await test_create_specialist_pool_async(request_type=dict) - - -def test_create_specialist_pool_field_headers(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = specialist_pool_service.CreateSpecialistPoolRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_specialist_pool), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.create_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_specialist_pool_field_headers_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = specialist_pool_service.CreateSpecialistPoolRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_specialist_pool), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.create_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_specialist_pool_flattened(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_specialist_pool( - parent='parent_value', - specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].specialist_pool - mock_val = gca_specialist_pool.SpecialistPool(name='name_value') - assert arg == mock_val - - -def test_create_specialist_pool_flattened_error(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_specialist_pool( - specialist_pool_service.CreateSpecialistPoolRequest(), - parent='parent_value', - specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), - ) - - -@pytest.mark.asyncio -async def test_create_specialist_pool_flattened_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_specialist_pool( - parent='parent_value', - specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].specialist_pool - mock_val = gca_specialist_pool.SpecialistPool(name='name_value') - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_specialist_pool_flattened_error_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_specialist_pool( - specialist_pool_service.CreateSpecialistPoolRequest(), - parent='parent_value', - specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), - ) - - -def test_get_specialist_pool(transport: str = 'grpc', request_type=specialist_pool_service.GetSpecialistPoolRequest): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = specialist_pool.SpecialistPool( - name='name_value', - display_name='display_name_value', - specialist_managers_count=2662, - specialist_manager_emails=['specialist_manager_emails_value'], - pending_data_labeling_jobs=['pending_data_labeling_jobs_value'], - specialist_worker_emails=['specialist_worker_emails_value'], - ) - response = client.get_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == specialist_pool_service.GetSpecialistPoolRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, specialist_pool.SpecialistPool) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.specialist_managers_count == 2662 - assert response.specialist_manager_emails == ['specialist_manager_emails_value'] - assert response.pending_data_labeling_jobs == ['pending_data_labeling_jobs_value'] - assert response.specialist_worker_emails == ['specialist_worker_emails_value'] - - -def test_get_specialist_pool_from_dict(): - test_get_specialist_pool(request_type=dict) - - -def test_get_specialist_pool_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_specialist_pool), - '__call__') as call: - client.get_specialist_pool() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == specialist_pool_service.GetSpecialistPoolRequest() - - -@pytest.mark.asyncio -async def test_get_specialist_pool_async(transport: str = 'grpc_asyncio', request_type=specialist_pool_service.GetSpecialistPoolRequest): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(specialist_pool.SpecialistPool( - name='name_value', - display_name='display_name_value', - specialist_managers_count=2662, - specialist_manager_emails=['specialist_manager_emails_value'], - pending_data_labeling_jobs=['pending_data_labeling_jobs_value'], - specialist_worker_emails=['specialist_worker_emails_value'], - )) - response = await client.get_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == specialist_pool_service.GetSpecialistPoolRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, specialist_pool.SpecialistPool) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.specialist_managers_count == 2662 - assert response.specialist_manager_emails == ['specialist_manager_emails_value'] - assert response.pending_data_labeling_jobs == ['pending_data_labeling_jobs_value'] - assert response.specialist_worker_emails == ['specialist_worker_emails_value'] - - -@pytest.mark.asyncio -async def test_get_specialist_pool_async_from_dict(): - await test_get_specialist_pool_async(request_type=dict) - - -def test_get_specialist_pool_field_headers(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = specialist_pool_service.GetSpecialistPoolRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_specialist_pool), - '__call__') as call: - call.return_value = specialist_pool.SpecialistPool() - client.get_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_specialist_pool_field_headers_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = specialist_pool_service.GetSpecialistPoolRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_specialist_pool), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(specialist_pool.SpecialistPool()) - await client.get_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_specialist_pool_flattened(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = specialist_pool.SpecialistPool() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_specialist_pool( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_specialist_pool_flattened_error(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_specialist_pool( - specialist_pool_service.GetSpecialistPoolRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_specialist_pool_flattened_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = specialist_pool.SpecialistPool() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(specialist_pool.SpecialistPool()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_specialist_pool( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_specialist_pool_flattened_error_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_specialist_pool( - specialist_pool_service.GetSpecialistPoolRequest(), - name='name_value', - ) - - -def test_list_specialist_pools(transport: str = 'grpc', request_type=specialist_pool_service.ListSpecialistPoolsRequest): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_specialist_pools), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = specialist_pool_service.ListSpecialistPoolsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_specialist_pools(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == specialist_pool_service.ListSpecialistPoolsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListSpecialistPoolsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_specialist_pools_from_dict(): - test_list_specialist_pools(request_type=dict) - - -def test_list_specialist_pools_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_specialist_pools), - '__call__') as call: - client.list_specialist_pools() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == specialist_pool_service.ListSpecialistPoolsRequest() - - -@pytest.mark.asyncio -async def test_list_specialist_pools_async(transport: str = 'grpc_asyncio', request_type=specialist_pool_service.ListSpecialistPoolsRequest): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_specialist_pools), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(specialist_pool_service.ListSpecialistPoolsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_specialist_pools(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == specialist_pool_service.ListSpecialistPoolsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListSpecialistPoolsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_specialist_pools_async_from_dict(): - await test_list_specialist_pools_async(request_type=dict) - - -def test_list_specialist_pools_field_headers(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = specialist_pool_service.ListSpecialistPoolsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_specialist_pools), - '__call__') as call: - call.return_value = specialist_pool_service.ListSpecialistPoolsResponse() - client.list_specialist_pools(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_specialist_pools_field_headers_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = specialist_pool_service.ListSpecialistPoolsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_specialist_pools), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(specialist_pool_service.ListSpecialistPoolsResponse()) - await client.list_specialist_pools(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_specialist_pools_flattened(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_specialist_pools), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = specialist_pool_service.ListSpecialistPoolsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_specialist_pools( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_specialist_pools_flattened_error(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_specialist_pools( - specialist_pool_service.ListSpecialistPoolsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_specialist_pools_flattened_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_specialist_pools), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = specialist_pool_service.ListSpecialistPoolsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(specialist_pool_service.ListSpecialistPoolsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_specialist_pools( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_specialist_pools_flattened_error_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_specialist_pools( - specialist_pool_service.ListSpecialistPoolsRequest(), - parent='parent_value', - ) - - -def test_list_specialist_pools_pager(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_specialist_pools), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[ - specialist_pool.SpecialistPool(), - specialist_pool.SpecialistPool(), - specialist_pool.SpecialistPool(), - ], - next_page_token='abc', - ), - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[], - next_page_token='def', - ), - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[ - specialist_pool.SpecialistPool(), - ], - next_page_token='ghi', - ), - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[ - specialist_pool.SpecialistPool(), - specialist_pool.SpecialistPool(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_specialist_pools(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, specialist_pool.SpecialistPool) - for i in results) - -def test_list_specialist_pools_pages(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_specialist_pools), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[ - specialist_pool.SpecialistPool(), - specialist_pool.SpecialistPool(), - specialist_pool.SpecialistPool(), - ], - next_page_token='abc', - ), - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[], - next_page_token='def', - ), - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[ - specialist_pool.SpecialistPool(), - ], - next_page_token='ghi', - ), - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[ - specialist_pool.SpecialistPool(), - specialist_pool.SpecialistPool(), - ], - ), - RuntimeError, - ) - pages = list(client.list_specialist_pools(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_specialist_pools_async_pager(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_specialist_pools), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[ - specialist_pool.SpecialistPool(), - specialist_pool.SpecialistPool(), - specialist_pool.SpecialistPool(), - ], - next_page_token='abc', - ), - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[], - next_page_token='def', - ), - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[ - specialist_pool.SpecialistPool(), - ], - next_page_token='ghi', - ), - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[ - specialist_pool.SpecialistPool(), - specialist_pool.SpecialistPool(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_specialist_pools(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, specialist_pool.SpecialistPool) - for i in responses) - -@pytest.mark.asyncio -async def test_list_specialist_pools_async_pages(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_specialist_pools), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[ - specialist_pool.SpecialistPool(), - specialist_pool.SpecialistPool(), - specialist_pool.SpecialistPool(), - ], - next_page_token='abc', - ), - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[], - next_page_token='def', - ), - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[ - specialist_pool.SpecialistPool(), - ], - next_page_token='ghi', - ), - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[ - specialist_pool.SpecialistPool(), - specialist_pool.SpecialistPool(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_specialist_pools(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_delete_specialist_pool(transport: str = 'grpc', request_type=specialist_pool_service.DeleteSpecialistPoolRequest): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == specialist_pool_service.DeleteSpecialistPoolRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_specialist_pool_from_dict(): - test_delete_specialist_pool(request_type=dict) - - -def test_delete_specialist_pool_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_specialist_pool), - '__call__') as call: - client.delete_specialist_pool() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == specialist_pool_service.DeleteSpecialistPoolRequest() - - -@pytest.mark.asyncio -async def test_delete_specialist_pool_async(transport: str = 'grpc_asyncio', request_type=specialist_pool_service.DeleteSpecialistPoolRequest): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == specialist_pool_service.DeleteSpecialistPoolRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_specialist_pool_async_from_dict(): - await test_delete_specialist_pool_async(request_type=dict) - - -def test_delete_specialist_pool_field_headers(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = specialist_pool_service.DeleteSpecialistPoolRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_specialist_pool), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_specialist_pool_field_headers_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = specialist_pool_service.DeleteSpecialistPoolRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_specialist_pool), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_specialist_pool_flattened(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_specialist_pool( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_delete_specialist_pool_flattened_error(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_specialist_pool( - specialist_pool_service.DeleteSpecialistPoolRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_specialist_pool_flattened_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_specialist_pool( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_specialist_pool_flattened_error_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_specialist_pool( - specialist_pool_service.DeleteSpecialistPoolRequest(), - name='name_value', - ) - - -def test_update_specialist_pool(transport: str = 'grpc', request_type=specialist_pool_service.UpdateSpecialistPoolRequest): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.update_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == specialist_pool_service.UpdateSpecialistPoolRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_update_specialist_pool_from_dict(): - test_update_specialist_pool(request_type=dict) - - -def test_update_specialist_pool_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_specialist_pool), - '__call__') as call: - client.update_specialist_pool() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == specialist_pool_service.UpdateSpecialistPoolRequest() - - -@pytest.mark.asyncio -async def test_update_specialist_pool_async(transport: str = 'grpc_asyncio', request_type=specialist_pool_service.UpdateSpecialistPoolRequest): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.update_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == specialist_pool_service.UpdateSpecialistPoolRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_update_specialist_pool_async_from_dict(): - await test_update_specialist_pool_async(request_type=dict) - - -def test_update_specialist_pool_field_headers(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = specialist_pool_service.UpdateSpecialistPoolRequest() - - request.specialist_pool.name = 'specialist_pool.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_specialist_pool), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.update_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'specialist_pool.name=specialist_pool.name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_specialist_pool_field_headers_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = specialist_pool_service.UpdateSpecialistPoolRequest() - - request.specialist_pool.name = 'specialist_pool.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_specialist_pool), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.update_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'specialist_pool.name=specialist_pool.name/value', - ) in kw['metadata'] - - -def test_update_specialist_pool_flattened(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_specialist_pool( - specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].specialist_pool - mock_val = gca_specialist_pool.SpecialistPool(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -def test_update_specialist_pool_flattened_error(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_specialist_pool( - specialist_pool_service.UpdateSpecialistPoolRequest(), - specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.asyncio -async def test_update_specialist_pool_flattened_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_specialist_pool( - specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].specialist_pool - mock_val = gca_specialist_pool.SpecialistPool(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_update_specialist_pool_flattened_error_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_specialist_pool( - specialist_pool_service.UpdateSpecialistPoolRequest(), - specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.SpecialistPoolServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.SpecialistPoolServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = SpecialistPoolServiceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.SpecialistPoolServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = SpecialistPoolServiceClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.SpecialistPoolServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = SpecialistPoolServiceClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.SpecialistPoolServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.SpecialistPoolServiceGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.SpecialistPoolServiceGrpcTransport, - transports.SpecialistPoolServiceGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.SpecialistPoolServiceGrpcTransport, - ) - -def test_specialist_pool_service_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.SpecialistPoolServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_specialist_pool_service_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.aiplatform_v1.services.specialist_pool_service.transports.SpecialistPoolServiceTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.SpecialistPoolServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'create_specialist_pool', - 'get_specialist_pool', - 'list_specialist_pools', - 'delete_specialist_pool', - 'update_specialist_pool', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - with pytest.raises(NotImplementedError): - transport.close() - - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client - - -def test_specialist_pool_service_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.specialist_pool_service.transports.SpecialistPoolServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.SpecialistPoolServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -def test_specialist_pool_service_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1.services.specialist_pool_service.transports.SpecialistPoolServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.SpecialistPoolServiceTransport() - adc.assert_called_once() - - -def test_specialist_pool_service_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - SpecialistPoolServiceClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.SpecialistPoolServiceGrpcTransport, - transports.SpecialistPoolServiceGrpcAsyncIOTransport, - ], -) -def test_specialist_pool_service_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.SpecialistPoolServiceGrpcTransport, grpc_helpers), - (transports.SpecialistPoolServiceGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_specialist_pool_service_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "aiplatform.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="aiplatform.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.SpecialistPoolServiceGrpcTransport, transports.SpecialistPoolServiceGrpcAsyncIOTransport]) -def test_specialist_pool_service_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_specialist_pool_service_host_no_port(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_specialist_pool_service_host_with_port(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:8000' - -def test_specialist_pool_service_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.SpecialistPoolServiceGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_specialist_pool_service_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.SpecialistPoolServiceGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.SpecialistPoolServiceGrpcTransport, transports.SpecialistPoolServiceGrpcAsyncIOTransport]) -def test_specialist_pool_service_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.SpecialistPoolServiceGrpcTransport, transports.SpecialistPoolServiceGrpcAsyncIOTransport]) -def test_specialist_pool_service_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_specialist_pool_service_grpc_lro_client(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_specialist_pool_service_grpc_lro_async_client(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc_asyncio', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsAsyncClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_specialist_pool_path(): - project = "squid" - location = "clam" - specialist_pool = "whelk" - expected = "projects/{project}/locations/{location}/specialistPools/{specialist_pool}".format(project=project, location=location, specialist_pool=specialist_pool, ) - actual = SpecialistPoolServiceClient.specialist_pool_path(project, location, specialist_pool) - assert expected == actual - - -def test_parse_specialist_pool_path(): - expected = { - "project": "octopus", - "location": "oyster", - "specialist_pool": "nudibranch", - } - path = SpecialistPoolServiceClient.specialist_pool_path(**expected) - - # Check that the path construction is reversible. - actual = SpecialistPoolServiceClient.parse_specialist_pool_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "cuttlefish" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = SpecialistPoolServiceClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "mussel", - } - path = SpecialistPoolServiceClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = SpecialistPoolServiceClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "winkle" - expected = "folders/{folder}".format(folder=folder, ) - actual = SpecialistPoolServiceClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "nautilus", - } - path = SpecialistPoolServiceClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = SpecialistPoolServiceClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "scallop" - expected = "organizations/{organization}".format(organization=organization, ) - actual = SpecialistPoolServiceClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "abalone", - } - path = SpecialistPoolServiceClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = SpecialistPoolServiceClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "squid" - expected = "projects/{project}".format(project=project, ) - actual = SpecialistPoolServiceClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "clam", - } - path = SpecialistPoolServiceClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = SpecialistPoolServiceClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "whelk" - location = "octopus" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = SpecialistPoolServiceClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "oyster", - "location": "nudibranch", - } - path = SpecialistPoolServiceClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = SpecialistPoolServiceClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.SpecialistPoolServiceTransport, '_prep_wrapped_messages') as prep: - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.SpecialistPoolServiceTransport, '_prep_wrapped_messages') as prep: - transport_class = SpecialistPoolServiceClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - -@pytest.mark.asyncio -async def test_transport_close_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: - async with client: - close.assert_not_called() - close.assert_called_once() - -def test_transport_close(): - transports = { - "grpc": "_grpc_channel", - } - - for transport, close_name in transports.items(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: - with client: - close.assert_not_called() - close.assert_called_once() - -def test_client_ctx(): - transports = [ - 'grpc', - ] - for transport in transports: - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - # Test client calls underlying transport. - with mock.patch.object(type(client.transport), "close") as close: - close.assert_not_called() - with client: - pass - close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_tensorboard_service.py b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_tensorboard_service.py deleted file mode 100644 index d90d8c03cd..0000000000 --- a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_tensorboard_service.py +++ /dev/null @@ -1,8865 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import future -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import operation_async # type: ignore -from google.api_core import operations_v1 -from google.api_core import path_template -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.aiplatform_v1.services.tensorboard_service import TensorboardServiceAsyncClient -from google.cloud.aiplatform_v1.services.tensorboard_service import TensorboardServiceClient -from google.cloud.aiplatform_v1.services.tensorboard_service import pagers -from google.cloud.aiplatform_v1.services.tensorboard_service import transports -from google.cloud.aiplatform_v1.types import encryption_spec -from google.cloud.aiplatform_v1.types import operation as gca_operation -from google.cloud.aiplatform_v1.types import tensorboard -from google.cloud.aiplatform_v1.types import tensorboard as gca_tensorboard -from google.cloud.aiplatform_v1.types import tensorboard_data -from google.cloud.aiplatform_v1.types import tensorboard_experiment -from google.cloud.aiplatform_v1.types import tensorboard_experiment as gca_tensorboard_experiment -from google.cloud.aiplatform_v1.types import tensorboard_run -from google.cloud.aiplatform_v1.types import tensorboard_run as gca_tensorboard_run -from google.cloud.aiplatform_v1.types import tensorboard_service -from google.cloud.aiplatform_v1.types import tensorboard_time_series -from google.cloud.aiplatform_v1.types import tensorboard_time_series as gca_tensorboard_time_series -from google.longrunning import operations_pb2 -from google.oauth2 import service_account -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -import google.auth - - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert TensorboardServiceClient._get_default_mtls_endpoint(None) is None - assert TensorboardServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert TensorboardServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert TensorboardServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert TensorboardServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert TensorboardServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - TensorboardServiceClient, - TensorboardServiceAsyncClient, -]) -def test_tensorboard_service_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.TensorboardServiceGrpcTransport, "grpc"), - (transports.TensorboardServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_tensorboard_service_client_service_account_always_use_jwt(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=False) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("client_class", [ - TensorboardServiceClient, - TensorboardServiceAsyncClient, -]) -def test_tensorboard_service_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_tensorboard_service_client_get_transport_class(): - transport = TensorboardServiceClient.get_transport_class() - available_transports = [ - transports.TensorboardServiceGrpcTransport, - ] - assert transport in available_transports - - transport = TensorboardServiceClient.get_transport_class("grpc") - assert transport == transports.TensorboardServiceGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (TensorboardServiceClient, transports.TensorboardServiceGrpcTransport, "grpc"), - (TensorboardServiceAsyncClient, transports.TensorboardServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(TensorboardServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TensorboardServiceClient)) -@mock.patch.object(TensorboardServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TensorboardServiceAsyncClient)) -def test_tensorboard_service_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(TensorboardServiceClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(TensorboardServiceClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (TensorboardServiceClient, transports.TensorboardServiceGrpcTransport, "grpc", "true"), - (TensorboardServiceAsyncClient, transports.TensorboardServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (TensorboardServiceClient, transports.TensorboardServiceGrpcTransport, "grpc", "false"), - (TensorboardServiceAsyncClient, transports.TensorboardServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(TensorboardServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TensorboardServiceClient)) -@mock.patch.object(TensorboardServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TensorboardServiceAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_tensorboard_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (TensorboardServiceClient, transports.TensorboardServiceGrpcTransport, "grpc"), - (TensorboardServiceAsyncClient, transports.TensorboardServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_tensorboard_service_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (TensorboardServiceClient, transports.TensorboardServiceGrpcTransport, "grpc"), - (TensorboardServiceAsyncClient, transports.TensorboardServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_tensorboard_service_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_tensorboard_service_client_client_options_from_dict(): - with mock.patch('google.cloud.aiplatform_v1.services.tensorboard_service.transports.TensorboardServiceGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = TensorboardServiceClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_create_tensorboard(transport: str = 'grpc', request_type=tensorboard_service.CreateTensorboardRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.create_tensorboard(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.CreateTensorboardRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_create_tensorboard_from_dict(): - test_create_tensorboard(request_type=dict) - - -def test_create_tensorboard_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard), - '__call__') as call: - client.create_tensorboard() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.CreateTensorboardRequest() - - -@pytest.mark.asyncio -async def test_create_tensorboard_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.CreateTensorboardRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.create_tensorboard(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.CreateTensorboardRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_create_tensorboard_async_from_dict(): - await test_create_tensorboard_async(request_type=dict) - - -def test_create_tensorboard_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.CreateTensorboardRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.create_tensorboard(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_tensorboard_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.CreateTensorboardRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.create_tensorboard(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_tensorboard_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_tensorboard( - parent='parent_value', - tensorboard=gca_tensorboard.Tensorboard(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].tensorboard - mock_val = gca_tensorboard.Tensorboard(name='name_value') - assert arg == mock_val - - -def test_create_tensorboard_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_tensorboard( - tensorboard_service.CreateTensorboardRequest(), - parent='parent_value', - tensorboard=gca_tensorboard.Tensorboard(name='name_value'), - ) - - -@pytest.mark.asyncio -async def test_create_tensorboard_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_tensorboard( - parent='parent_value', - tensorboard=gca_tensorboard.Tensorboard(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].tensorboard - mock_val = gca_tensorboard.Tensorboard(name='name_value') - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_tensorboard_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_tensorboard( - tensorboard_service.CreateTensorboardRequest(), - parent='parent_value', - tensorboard=gca_tensorboard.Tensorboard(name='name_value'), - ) - - -def test_get_tensorboard(transport: str = 'grpc', request_type=tensorboard_service.GetTensorboardRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard.Tensorboard( - name='name_value', - display_name='display_name_value', - description='description_value', - blob_storage_path_prefix='blob_storage_path_prefix_value', - run_count=989, - etag='etag_value', - ) - response = client.get_tensorboard(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.GetTensorboardRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, tensorboard.Tensorboard) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.blob_storage_path_prefix == 'blob_storage_path_prefix_value' - assert response.run_count == 989 - assert response.etag == 'etag_value' - - -def test_get_tensorboard_from_dict(): - test_get_tensorboard(request_type=dict) - - -def test_get_tensorboard_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard), - '__call__') as call: - client.get_tensorboard() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.GetTensorboardRequest() - - -@pytest.mark.asyncio -async def test_get_tensorboard_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.GetTensorboardRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard.Tensorboard( - name='name_value', - display_name='display_name_value', - description='description_value', - blob_storage_path_prefix='blob_storage_path_prefix_value', - run_count=989, - etag='etag_value', - )) - response = await client.get_tensorboard(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.GetTensorboardRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, tensorboard.Tensorboard) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.blob_storage_path_prefix == 'blob_storage_path_prefix_value' - assert response.run_count == 989 - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_get_tensorboard_async_from_dict(): - await test_get_tensorboard_async(request_type=dict) - - -def test_get_tensorboard_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.GetTensorboardRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard), - '__call__') as call: - call.return_value = tensorboard.Tensorboard() - client.get_tensorboard(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_tensorboard_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.GetTensorboardRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard.Tensorboard()) - await client.get_tensorboard(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_tensorboard_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard.Tensorboard() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_tensorboard( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_tensorboard_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_tensorboard( - tensorboard_service.GetTensorboardRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_tensorboard_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard.Tensorboard() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard.Tensorboard()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_tensorboard( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_tensorboard_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_tensorboard( - tensorboard_service.GetTensorboardRequest(), - name='name_value', - ) - - -def test_update_tensorboard(transport: str = 'grpc', request_type=tensorboard_service.UpdateTensorboardRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.update_tensorboard(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.UpdateTensorboardRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_update_tensorboard_from_dict(): - test_update_tensorboard(request_type=dict) - - -def test_update_tensorboard_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard), - '__call__') as call: - client.update_tensorboard() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.UpdateTensorboardRequest() - - -@pytest.mark.asyncio -async def test_update_tensorboard_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.UpdateTensorboardRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.update_tensorboard(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.UpdateTensorboardRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_update_tensorboard_async_from_dict(): - await test_update_tensorboard_async(request_type=dict) - - -def test_update_tensorboard_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.UpdateTensorboardRequest() - - request.tensorboard.name = 'tensorboard.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.update_tensorboard(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'tensorboard.name=tensorboard.name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_tensorboard_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.UpdateTensorboardRequest() - - request.tensorboard.name = 'tensorboard.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.update_tensorboard(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'tensorboard.name=tensorboard.name/value', - ) in kw['metadata'] - - -def test_update_tensorboard_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_tensorboard( - tensorboard=gca_tensorboard.Tensorboard(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].tensorboard - mock_val = gca_tensorboard.Tensorboard(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -def test_update_tensorboard_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_tensorboard( - tensorboard_service.UpdateTensorboardRequest(), - tensorboard=gca_tensorboard.Tensorboard(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.asyncio -async def test_update_tensorboard_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_tensorboard( - tensorboard=gca_tensorboard.Tensorboard(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].tensorboard - mock_val = gca_tensorboard.Tensorboard(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_update_tensorboard_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_tensorboard( - tensorboard_service.UpdateTensorboardRequest(), - tensorboard=gca_tensorboard.Tensorboard(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_list_tensorboards(transport: str = 'grpc', request_type=tensorboard_service.ListTensorboardsRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboards), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.ListTensorboardsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_tensorboards(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ListTensorboardsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListTensorboardsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_tensorboards_from_dict(): - test_list_tensorboards(request_type=dict) - - -def test_list_tensorboards_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboards), - '__call__') as call: - client.list_tensorboards() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ListTensorboardsRequest() - - -@pytest.mark.asyncio -async def test_list_tensorboards_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.ListTensorboardsRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboards), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_tensorboards(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ListTensorboardsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListTensorboardsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_tensorboards_async_from_dict(): - await test_list_tensorboards_async(request_type=dict) - - -def test_list_tensorboards_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.ListTensorboardsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboards), - '__call__') as call: - call.return_value = tensorboard_service.ListTensorboardsResponse() - client.list_tensorboards(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_tensorboards_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.ListTensorboardsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboards), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardsResponse()) - await client.list_tensorboards(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_tensorboards_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboards), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.ListTensorboardsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_tensorboards( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_tensorboards_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_tensorboards( - tensorboard_service.ListTensorboardsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_tensorboards_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboards), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.ListTensorboardsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_tensorboards( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_tensorboards_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_tensorboards( - tensorboard_service.ListTensorboardsRequest(), - parent='parent_value', - ) - - -def test_list_tensorboards_pager(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboards), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - tensorboard_service.ListTensorboardsResponse( - tensorboards=[ - tensorboard.Tensorboard(), - tensorboard.Tensorboard(), - tensorboard.Tensorboard(), - ], - next_page_token='abc', - ), - tensorboard_service.ListTensorboardsResponse( - tensorboards=[], - next_page_token='def', - ), - tensorboard_service.ListTensorboardsResponse( - tensorboards=[ - tensorboard.Tensorboard(), - ], - next_page_token='ghi', - ), - tensorboard_service.ListTensorboardsResponse( - tensorboards=[ - tensorboard.Tensorboard(), - tensorboard.Tensorboard(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_tensorboards(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, tensorboard.Tensorboard) - for i in results) - -def test_list_tensorboards_pages(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboards), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - tensorboard_service.ListTensorboardsResponse( - tensorboards=[ - tensorboard.Tensorboard(), - tensorboard.Tensorboard(), - tensorboard.Tensorboard(), - ], - next_page_token='abc', - ), - tensorboard_service.ListTensorboardsResponse( - tensorboards=[], - next_page_token='def', - ), - tensorboard_service.ListTensorboardsResponse( - tensorboards=[ - tensorboard.Tensorboard(), - ], - next_page_token='ghi', - ), - tensorboard_service.ListTensorboardsResponse( - tensorboards=[ - tensorboard.Tensorboard(), - tensorboard.Tensorboard(), - ], - ), - RuntimeError, - ) - pages = list(client.list_tensorboards(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_tensorboards_async_pager(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboards), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - tensorboard_service.ListTensorboardsResponse( - tensorboards=[ - tensorboard.Tensorboard(), - tensorboard.Tensorboard(), - tensorboard.Tensorboard(), - ], - next_page_token='abc', - ), - tensorboard_service.ListTensorboardsResponse( - tensorboards=[], - next_page_token='def', - ), - tensorboard_service.ListTensorboardsResponse( - tensorboards=[ - tensorboard.Tensorboard(), - ], - next_page_token='ghi', - ), - tensorboard_service.ListTensorboardsResponse( - tensorboards=[ - tensorboard.Tensorboard(), - tensorboard.Tensorboard(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_tensorboards(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, tensorboard.Tensorboard) - for i in responses) - -@pytest.mark.asyncio -async def test_list_tensorboards_async_pages(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboards), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - tensorboard_service.ListTensorboardsResponse( - tensorboards=[ - tensorboard.Tensorboard(), - tensorboard.Tensorboard(), - tensorboard.Tensorboard(), - ], - next_page_token='abc', - ), - tensorboard_service.ListTensorboardsResponse( - tensorboards=[], - next_page_token='def', - ), - tensorboard_service.ListTensorboardsResponse( - tensorboards=[ - tensorboard.Tensorboard(), - ], - next_page_token='ghi', - ), - tensorboard_service.ListTensorboardsResponse( - tensorboards=[ - tensorboard.Tensorboard(), - tensorboard.Tensorboard(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_tensorboards(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_delete_tensorboard(transport: str = 'grpc', request_type=tensorboard_service.DeleteTensorboardRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_tensorboard(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.DeleteTensorboardRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_tensorboard_from_dict(): - test_delete_tensorboard(request_type=dict) - - -def test_delete_tensorboard_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard), - '__call__') as call: - client.delete_tensorboard() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.DeleteTensorboardRequest() - - -@pytest.mark.asyncio -async def test_delete_tensorboard_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.DeleteTensorboardRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_tensorboard(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.DeleteTensorboardRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_tensorboard_async_from_dict(): - await test_delete_tensorboard_async(request_type=dict) - - -def test_delete_tensorboard_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.DeleteTensorboardRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_tensorboard(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_tensorboard_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.DeleteTensorboardRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_tensorboard(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_tensorboard_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_tensorboard( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_delete_tensorboard_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_tensorboard( - tensorboard_service.DeleteTensorboardRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_tensorboard_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_tensorboard( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_tensorboard_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_tensorboard( - tensorboard_service.DeleteTensorboardRequest(), - name='name_value', - ) - - -def test_create_tensorboard_experiment(transport: str = 'grpc', request_type=tensorboard_service.CreateTensorboardExperimentRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_experiment), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_tensorboard_experiment.TensorboardExperiment( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - source='source_value', - ) - response = client.create_tensorboard_experiment(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.CreateTensorboardExperimentRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_tensorboard_experiment.TensorboardExperiment) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - assert response.source == 'source_value' - - -def test_create_tensorboard_experiment_from_dict(): - test_create_tensorboard_experiment(request_type=dict) - - -def test_create_tensorboard_experiment_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_experiment), - '__call__') as call: - client.create_tensorboard_experiment() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.CreateTensorboardExperimentRequest() - - -@pytest.mark.asyncio -async def test_create_tensorboard_experiment_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.CreateTensorboardExperimentRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_experiment), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_experiment.TensorboardExperiment( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - source='source_value', - )) - response = await client.create_tensorboard_experiment(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.CreateTensorboardExperimentRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_tensorboard_experiment.TensorboardExperiment) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - assert response.source == 'source_value' - - -@pytest.mark.asyncio -async def test_create_tensorboard_experiment_async_from_dict(): - await test_create_tensorboard_experiment_async(request_type=dict) - - -def test_create_tensorboard_experiment_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.CreateTensorboardExperimentRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_experiment), - '__call__') as call: - call.return_value = gca_tensorboard_experiment.TensorboardExperiment() - client.create_tensorboard_experiment(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_tensorboard_experiment_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.CreateTensorboardExperimentRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_experiment), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_experiment.TensorboardExperiment()) - await client.create_tensorboard_experiment(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_tensorboard_experiment_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_experiment), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_tensorboard_experiment.TensorboardExperiment() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_tensorboard_experiment( - parent='parent_value', - tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment(name='name_value'), - tensorboard_experiment_id='tensorboard_experiment_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].tensorboard_experiment - mock_val = gca_tensorboard_experiment.TensorboardExperiment(name='name_value') - assert arg == mock_val - arg = args[0].tensorboard_experiment_id - mock_val = 'tensorboard_experiment_id_value' - assert arg == mock_val - - -def test_create_tensorboard_experiment_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_tensorboard_experiment( - tensorboard_service.CreateTensorboardExperimentRequest(), - parent='parent_value', - tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment(name='name_value'), - tensorboard_experiment_id='tensorboard_experiment_id_value', - ) - - -@pytest.mark.asyncio -async def test_create_tensorboard_experiment_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_experiment), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_tensorboard_experiment.TensorboardExperiment() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_experiment.TensorboardExperiment()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_tensorboard_experiment( - parent='parent_value', - tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment(name='name_value'), - tensorboard_experiment_id='tensorboard_experiment_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].tensorboard_experiment - mock_val = gca_tensorboard_experiment.TensorboardExperiment(name='name_value') - assert arg == mock_val - arg = args[0].tensorboard_experiment_id - mock_val = 'tensorboard_experiment_id_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_tensorboard_experiment_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_tensorboard_experiment( - tensorboard_service.CreateTensorboardExperimentRequest(), - parent='parent_value', - tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment(name='name_value'), - tensorboard_experiment_id='tensorboard_experiment_id_value', - ) - - -def test_get_tensorboard_experiment(transport: str = 'grpc', request_type=tensorboard_service.GetTensorboardExperimentRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_experiment), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_experiment.TensorboardExperiment( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - source='source_value', - ) - response = client.get_tensorboard_experiment(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.GetTensorboardExperimentRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, tensorboard_experiment.TensorboardExperiment) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - assert response.source == 'source_value' - - -def test_get_tensorboard_experiment_from_dict(): - test_get_tensorboard_experiment(request_type=dict) - - -def test_get_tensorboard_experiment_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_experiment), - '__call__') as call: - client.get_tensorboard_experiment() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.GetTensorboardExperimentRequest() - - -@pytest.mark.asyncio -async def test_get_tensorboard_experiment_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.GetTensorboardExperimentRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_experiment), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_experiment.TensorboardExperiment( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - source='source_value', - )) - response = await client.get_tensorboard_experiment(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.GetTensorboardExperimentRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, tensorboard_experiment.TensorboardExperiment) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - assert response.source == 'source_value' - - -@pytest.mark.asyncio -async def test_get_tensorboard_experiment_async_from_dict(): - await test_get_tensorboard_experiment_async(request_type=dict) - - -def test_get_tensorboard_experiment_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.GetTensorboardExperimentRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_experiment), - '__call__') as call: - call.return_value = tensorboard_experiment.TensorboardExperiment() - client.get_tensorboard_experiment(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_tensorboard_experiment_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.GetTensorboardExperimentRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_experiment), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_experiment.TensorboardExperiment()) - await client.get_tensorboard_experiment(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_tensorboard_experiment_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_experiment), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_experiment.TensorboardExperiment() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_tensorboard_experiment( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_tensorboard_experiment_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_tensorboard_experiment( - tensorboard_service.GetTensorboardExperimentRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_tensorboard_experiment_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_experiment), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_experiment.TensorboardExperiment() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_experiment.TensorboardExperiment()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_tensorboard_experiment( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_tensorboard_experiment_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_tensorboard_experiment( - tensorboard_service.GetTensorboardExperimentRequest(), - name='name_value', - ) - - -def test_update_tensorboard_experiment(transport: str = 'grpc', request_type=tensorboard_service.UpdateTensorboardExperimentRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_experiment), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_tensorboard_experiment.TensorboardExperiment( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - source='source_value', - ) - response = client.update_tensorboard_experiment(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.UpdateTensorboardExperimentRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_tensorboard_experiment.TensorboardExperiment) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - assert response.source == 'source_value' - - -def test_update_tensorboard_experiment_from_dict(): - test_update_tensorboard_experiment(request_type=dict) - - -def test_update_tensorboard_experiment_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_experiment), - '__call__') as call: - client.update_tensorboard_experiment() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.UpdateTensorboardExperimentRequest() - - -@pytest.mark.asyncio -async def test_update_tensorboard_experiment_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.UpdateTensorboardExperimentRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_experiment), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_experiment.TensorboardExperiment( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - source='source_value', - )) - response = await client.update_tensorboard_experiment(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.UpdateTensorboardExperimentRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_tensorboard_experiment.TensorboardExperiment) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - assert response.source == 'source_value' - - -@pytest.mark.asyncio -async def test_update_tensorboard_experiment_async_from_dict(): - await test_update_tensorboard_experiment_async(request_type=dict) - - -def test_update_tensorboard_experiment_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.UpdateTensorboardExperimentRequest() - - request.tensorboard_experiment.name = 'tensorboard_experiment.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_experiment), - '__call__') as call: - call.return_value = gca_tensorboard_experiment.TensorboardExperiment() - client.update_tensorboard_experiment(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'tensorboard_experiment.name=tensorboard_experiment.name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_tensorboard_experiment_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.UpdateTensorboardExperimentRequest() - - request.tensorboard_experiment.name = 'tensorboard_experiment.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_experiment), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_experiment.TensorboardExperiment()) - await client.update_tensorboard_experiment(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'tensorboard_experiment.name=tensorboard_experiment.name/value', - ) in kw['metadata'] - - -def test_update_tensorboard_experiment_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_experiment), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_tensorboard_experiment.TensorboardExperiment() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_tensorboard_experiment( - tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].tensorboard_experiment - mock_val = gca_tensorboard_experiment.TensorboardExperiment(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -def test_update_tensorboard_experiment_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_tensorboard_experiment( - tensorboard_service.UpdateTensorboardExperimentRequest(), - tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.asyncio -async def test_update_tensorboard_experiment_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_experiment), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_tensorboard_experiment.TensorboardExperiment() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_experiment.TensorboardExperiment()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_tensorboard_experiment( - tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].tensorboard_experiment - mock_val = gca_tensorboard_experiment.TensorboardExperiment(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_update_tensorboard_experiment_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_tensorboard_experiment( - tensorboard_service.UpdateTensorboardExperimentRequest(), - tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_list_tensorboard_experiments(transport: str = 'grpc', request_type=tensorboard_service.ListTensorboardExperimentsRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_experiments), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.ListTensorboardExperimentsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_tensorboard_experiments(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ListTensorboardExperimentsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListTensorboardExperimentsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_tensorboard_experiments_from_dict(): - test_list_tensorboard_experiments(request_type=dict) - - -def test_list_tensorboard_experiments_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_experiments), - '__call__') as call: - client.list_tensorboard_experiments() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ListTensorboardExperimentsRequest() - - -@pytest.mark.asyncio -async def test_list_tensorboard_experiments_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.ListTensorboardExperimentsRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_experiments), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardExperimentsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_tensorboard_experiments(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ListTensorboardExperimentsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListTensorboardExperimentsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_tensorboard_experiments_async_from_dict(): - await test_list_tensorboard_experiments_async(request_type=dict) - - -def test_list_tensorboard_experiments_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.ListTensorboardExperimentsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_experiments), - '__call__') as call: - call.return_value = tensorboard_service.ListTensorboardExperimentsResponse() - client.list_tensorboard_experiments(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_tensorboard_experiments_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.ListTensorboardExperimentsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_experiments), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardExperimentsResponse()) - await client.list_tensorboard_experiments(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_tensorboard_experiments_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_experiments), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.ListTensorboardExperimentsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_tensorboard_experiments( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_tensorboard_experiments_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_tensorboard_experiments( - tensorboard_service.ListTensorboardExperimentsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_tensorboard_experiments_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_experiments), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.ListTensorboardExperimentsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardExperimentsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_tensorboard_experiments( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_tensorboard_experiments_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_tensorboard_experiments( - tensorboard_service.ListTensorboardExperimentsRequest(), - parent='parent_value', - ) - - -def test_list_tensorboard_experiments_pager(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_experiments), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - tensorboard_service.ListTensorboardExperimentsResponse( - tensorboard_experiments=[ - tensorboard_experiment.TensorboardExperiment(), - tensorboard_experiment.TensorboardExperiment(), - tensorboard_experiment.TensorboardExperiment(), - ], - next_page_token='abc', - ), - tensorboard_service.ListTensorboardExperimentsResponse( - tensorboard_experiments=[], - next_page_token='def', - ), - tensorboard_service.ListTensorboardExperimentsResponse( - tensorboard_experiments=[ - tensorboard_experiment.TensorboardExperiment(), - ], - next_page_token='ghi', - ), - tensorboard_service.ListTensorboardExperimentsResponse( - tensorboard_experiments=[ - tensorboard_experiment.TensorboardExperiment(), - tensorboard_experiment.TensorboardExperiment(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_tensorboard_experiments(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, tensorboard_experiment.TensorboardExperiment) - for i in results) - -def test_list_tensorboard_experiments_pages(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_experiments), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - tensorboard_service.ListTensorboardExperimentsResponse( - tensorboard_experiments=[ - tensorboard_experiment.TensorboardExperiment(), - tensorboard_experiment.TensorboardExperiment(), - tensorboard_experiment.TensorboardExperiment(), - ], - next_page_token='abc', - ), - tensorboard_service.ListTensorboardExperimentsResponse( - tensorboard_experiments=[], - next_page_token='def', - ), - tensorboard_service.ListTensorboardExperimentsResponse( - tensorboard_experiments=[ - tensorboard_experiment.TensorboardExperiment(), - ], - next_page_token='ghi', - ), - tensorboard_service.ListTensorboardExperimentsResponse( - tensorboard_experiments=[ - tensorboard_experiment.TensorboardExperiment(), - tensorboard_experiment.TensorboardExperiment(), - ], - ), - RuntimeError, - ) - pages = list(client.list_tensorboard_experiments(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_tensorboard_experiments_async_pager(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_experiments), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - tensorboard_service.ListTensorboardExperimentsResponse( - tensorboard_experiments=[ - tensorboard_experiment.TensorboardExperiment(), - tensorboard_experiment.TensorboardExperiment(), - tensorboard_experiment.TensorboardExperiment(), - ], - next_page_token='abc', - ), - tensorboard_service.ListTensorboardExperimentsResponse( - tensorboard_experiments=[], - next_page_token='def', - ), - tensorboard_service.ListTensorboardExperimentsResponse( - tensorboard_experiments=[ - tensorboard_experiment.TensorboardExperiment(), - ], - next_page_token='ghi', - ), - tensorboard_service.ListTensorboardExperimentsResponse( - tensorboard_experiments=[ - tensorboard_experiment.TensorboardExperiment(), - tensorboard_experiment.TensorboardExperiment(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_tensorboard_experiments(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, tensorboard_experiment.TensorboardExperiment) - for i in responses) - -@pytest.mark.asyncio -async def test_list_tensorboard_experiments_async_pages(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_experiments), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - tensorboard_service.ListTensorboardExperimentsResponse( - tensorboard_experiments=[ - tensorboard_experiment.TensorboardExperiment(), - tensorboard_experiment.TensorboardExperiment(), - tensorboard_experiment.TensorboardExperiment(), - ], - next_page_token='abc', - ), - tensorboard_service.ListTensorboardExperimentsResponse( - tensorboard_experiments=[], - next_page_token='def', - ), - tensorboard_service.ListTensorboardExperimentsResponse( - tensorboard_experiments=[ - tensorboard_experiment.TensorboardExperiment(), - ], - next_page_token='ghi', - ), - tensorboard_service.ListTensorboardExperimentsResponse( - tensorboard_experiments=[ - tensorboard_experiment.TensorboardExperiment(), - tensorboard_experiment.TensorboardExperiment(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_tensorboard_experiments(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_delete_tensorboard_experiment(transport: str = 'grpc', request_type=tensorboard_service.DeleteTensorboardExperimentRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_experiment), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_tensorboard_experiment(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.DeleteTensorboardExperimentRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_tensorboard_experiment_from_dict(): - test_delete_tensorboard_experiment(request_type=dict) - - -def test_delete_tensorboard_experiment_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_experiment), - '__call__') as call: - client.delete_tensorboard_experiment() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.DeleteTensorboardExperimentRequest() - - -@pytest.mark.asyncio -async def test_delete_tensorboard_experiment_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.DeleteTensorboardExperimentRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_experiment), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_tensorboard_experiment(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.DeleteTensorboardExperimentRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_tensorboard_experiment_async_from_dict(): - await test_delete_tensorboard_experiment_async(request_type=dict) - - -def test_delete_tensorboard_experiment_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.DeleteTensorboardExperimentRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_experiment), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_tensorboard_experiment(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_tensorboard_experiment_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.DeleteTensorboardExperimentRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_experiment), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_tensorboard_experiment(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_tensorboard_experiment_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_experiment), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_tensorboard_experiment( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_delete_tensorboard_experiment_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_tensorboard_experiment( - tensorboard_service.DeleteTensorboardExperimentRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_tensorboard_experiment_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_experiment), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_tensorboard_experiment( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_tensorboard_experiment_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_tensorboard_experiment( - tensorboard_service.DeleteTensorboardExperimentRequest(), - name='name_value', - ) - - -def test_create_tensorboard_run(transport: str = 'grpc', request_type=tensorboard_service.CreateTensorboardRunRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_run), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_tensorboard_run.TensorboardRun( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - ) - response = client.create_tensorboard_run(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.CreateTensorboardRunRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_tensorboard_run.TensorboardRun) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - - -def test_create_tensorboard_run_from_dict(): - test_create_tensorboard_run(request_type=dict) - - -def test_create_tensorboard_run_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_run), - '__call__') as call: - client.create_tensorboard_run() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.CreateTensorboardRunRequest() - - -@pytest.mark.asyncio -async def test_create_tensorboard_run_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.CreateTensorboardRunRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_run), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_run.TensorboardRun( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - )) - response = await client.create_tensorboard_run(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.CreateTensorboardRunRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_tensorboard_run.TensorboardRun) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_create_tensorboard_run_async_from_dict(): - await test_create_tensorboard_run_async(request_type=dict) - - -def test_create_tensorboard_run_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.CreateTensorboardRunRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_run), - '__call__') as call: - call.return_value = gca_tensorboard_run.TensorboardRun() - client.create_tensorboard_run(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_tensorboard_run_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.CreateTensorboardRunRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_run), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_run.TensorboardRun()) - await client.create_tensorboard_run(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_tensorboard_run_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_run), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_tensorboard_run.TensorboardRun() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_tensorboard_run( - parent='parent_value', - tensorboard_run=gca_tensorboard_run.TensorboardRun(name='name_value'), - tensorboard_run_id='tensorboard_run_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].tensorboard_run - mock_val = gca_tensorboard_run.TensorboardRun(name='name_value') - assert arg == mock_val - arg = args[0].tensorboard_run_id - mock_val = 'tensorboard_run_id_value' - assert arg == mock_val - - -def test_create_tensorboard_run_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_tensorboard_run( - tensorboard_service.CreateTensorboardRunRequest(), - parent='parent_value', - tensorboard_run=gca_tensorboard_run.TensorboardRun(name='name_value'), - tensorboard_run_id='tensorboard_run_id_value', - ) - - -@pytest.mark.asyncio -async def test_create_tensorboard_run_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_run), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_tensorboard_run.TensorboardRun() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_run.TensorboardRun()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_tensorboard_run( - parent='parent_value', - tensorboard_run=gca_tensorboard_run.TensorboardRun(name='name_value'), - tensorboard_run_id='tensorboard_run_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].tensorboard_run - mock_val = gca_tensorboard_run.TensorboardRun(name='name_value') - assert arg == mock_val - arg = args[0].tensorboard_run_id - mock_val = 'tensorboard_run_id_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_tensorboard_run_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_tensorboard_run( - tensorboard_service.CreateTensorboardRunRequest(), - parent='parent_value', - tensorboard_run=gca_tensorboard_run.TensorboardRun(name='name_value'), - tensorboard_run_id='tensorboard_run_id_value', - ) - - -def test_batch_create_tensorboard_runs(transport: str = 'grpc', request_type=tensorboard_service.BatchCreateTensorboardRunsRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_create_tensorboard_runs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.BatchCreateTensorboardRunsResponse( - ) - response = client.batch_create_tensorboard_runs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.BatchCreateTensorboardRunsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, tensorboard_service.BatchCreateTensorboardRunsResponse) - - -def test_batch_create_tensorboard_runs_from_dict(): - test_batch_create_tensorboard_runs(request_type=dict) - - -def test_batch_create_tensorboard_runs_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_create_tensorboard_runs), - '__call__') as call: - client.batch_create_tensorboard_runs() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.BatchCreateTensorboardRunsRequest() - - -@pytest.mark.asyncio -async def test_batch_create_tensorboard_runs_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.BatchCreateTensorboardRunsRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_create_tensorboard_runs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.BatchCreateTensorboardRunsResponse( - )) - response = await client.batch_create_tensorboard_runs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.BatchCreateTensorboardRunsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, tensorboard_service.BatchCreateTensorboardRunsResponse) - - -@pytest.mark.asyncio -async def test_batch_create_tensorboard_runs_async_from_dict(): - await test_batch_create_tensorboard_runs_async(request_type=dict) - - -def test_batch_create_tensorboard_runs_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.BatchCreateTensorboardRunsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_create_tensorboard_runs), - '__call__') as call: - call.return_value = tensorboard_service.BatchCreateTensorboardRunsResponse() - client.batch_create_tensorboard_runs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_batch_create_tensorboard_runs_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.BatchCreateTensorboardRunsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_create_tensorboard_runs), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.BatchCreateTensorboardRunsResponse()) - await client.batch_create_tensorboard_runs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_batch_create_tensorboard_runs_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_create_tensorboard_runs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.BatchCreateTensorboardRunsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.batch_create_tensorboard_runs( - parent='parent_value', - requests=[tensorboard_service.CreateTensorboardRunRequest(parent='parent_value')], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].requests - mock_val = [tensorboard_service.CreateTensorboardRunRequest(parent='parent_value')] - assert arg == mock_val - - -def test_batch_create_tensorboard_runs_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.batch_create_tensorboard_runs( - tensorboard_service.BatchCreateTensorboardRunsRequest(), - parent='parent_value', - requests=[tensorboard_service.CreateTensorboardRunRequest(parent='parent_value')], - ) - - -@pytest.mark.asyncio -async def test_batch_create_tensorboard_runs_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_create_tensorboard_runs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.BatchCreateTensorboardRunsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.BatchCreateTensorboardRunsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.batch_create_tensorboard_runs( - parent='parent_value', - requests=[tensorboard_service.CreateTensorboardRunRequest(parent='parent_value')], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].requests - mock_val = [tensorboard_service.CreateTensorboardRunRequest(parent='parent_value')] - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_batch_create_tensorboard_runs_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.batch_create_tensorboard_runs( - tensorboard_service.BatchCreateTensorboardRunsRequest(), - parent='parent_value', - requests=[tensorboard_service.CreateTensorboardRunRequest(parent='parent_value')], - ) - - -def test_get_tensorboard_run(transport: str = 'grpc', request_type=tensorboard_service.GetTensorboardRunRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_run), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_run.TensorboardRun( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - ) - response = client.get_tensorboard_run(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.GetTensorboardRunRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, tensorboard_run.TensorboardRun) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - - -def test_get_tensorboard_run_from_dict(): - test_get_tensorboard_run(request_type=dict) - - -def test_get_tensorboard_run_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_run), - '__call__') as call: - client.get_tensorboard_run() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.GetTensorboardRunRequest() - - -@pytest.mark.asyncio -async def test_get_tensorboard_run_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.GetTensorboardRunRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_run), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_run.TensorboardRun( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - )) - response = await client.get_tensorboard_run(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.GetTensorboardRunRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, tensorboard_run.TensorboardRun) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_get_tensorboard_run_async_from_dict(): - await test_get_tensorboard_run_async(request_type=dict) - - -def test_get_tensorboard_run_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.GetTensorboardRunRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_run), - '__call__') as call: - call.return_value = tensorboard_run.TensorboardRun() - client.get_tensorboard_run(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_tensorboard_run_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.GetTensorboardRunRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_run), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_run.TensorboardRun()) - await client.get_tensorboard_run(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_tensorboard_run_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_run), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_run.TensorboardRun() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_tensorboard_run( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_tensorboard_run_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_tensorboard_run( - tensorboard_service.GetTensorboardRunRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_tensorboard_run_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_run), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_run.TensorboardRun() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_run.TensorboardRun()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_tensorboard_run( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_tensorboard_run_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_tensorboard_run( - tensorboard_service.GetTensorboardRunRequest(), - name='name_value', - ) - - -def test_update_tensorboard_run(transport: str = 'grpc', request_type=tensorboard_service.UpdateTensorboardRunRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_run), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_tensorboard_run.TensorboardRun( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - ) - response = client.update_tensorboard_run(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.UpdateTensorboardRunRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_tensorboard_run.TensorboardRun) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - - -def test_update_tensorboard_run_from_dict(): - test_update_tensorboard_run(request_type=dict) - - -def test_update_tensorboard_run_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_run), - '__call__') as call: - client.update_tensorboard_run() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.UpdateTensorboardRunRequest() - - -@pytest.mark.asyncio -async def test_update_tensorboard_run_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.UpdateTensorboardRunRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_run), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_run.TensorboardRun( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - )) - response = await client.update_tensorboard_run(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.UpdateTensorboardRunRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_tensorboard_run.TensorboardRun) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_update_tensorboard_run_async_from_dict(): - await test_update_tensorboard_run_async(request_type=dict) - - -def test_update_tensorboard_run_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.UpdateTensorboardRunRequest() - - request.tensorboard_run.name = 'tensorboard_run.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_run), - '__call__') as call: - call.return_value = gca_tensorboard_run.TensorboardRun() - client.update_tensorboard_run(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'tensorboard_run.name=tensorboard_run.name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_tensorboard_run_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.UpdateTensorboardRunRequest() - - request.tensorboard_run.name = 'tensorboard_run.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_run), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_run.TensorboardRun()) - await client.update_tensorboard_run(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'tensorboard_run.name=tensorboard_run.name/value', - ) in kw['metadata'] - - -def test_update_tensorboard_run_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_run), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_tensorboard_run.TensorboardRun() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_tensorboard_run( - tensorboard_run=gca_tensorboard_run.TensorboardRun(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].tensorboard_run - mock_val = gca_tensorboard_run.TensorboardRun(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -def test_update_tensorboard_run_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_tensorboard_run( - tensorboard_service.UpdateTensorboardRunRequest(), - tensorboard_run=gca_tensorboard_run.TensorboardRun(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.asyncio -async def test_update_tensorboard_run_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_run), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_tensorboard_run.TensorboardRun() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_run.TensorboardRun()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_tensorboard_run( - tensorboard_run=gca_tensorboard_run.TensorboardRun(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].tensorboard_run - mock_val = gca_tensorboard_run.TensorboardRun(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_update_tensorboard_run_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_tensorboard_run( - tensorboard_service.UpdateTensorboardRunRequest(), - tensorboard_run=gca_tensorboard_run.TensorboardRun(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_list_tensorboard_runs(transport: str = 'grpc', request_type=tensorboard_service.ListTensorboardRunsRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_runs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.ListTensorboardRunsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_tensorboard_runs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ListTensorboardRunsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListTensorboardRunsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_tensorboard_runs_from_dict(): - test_list_tensorboard_runs(request_type=dict) - - -def test_list_tensorboard_runs_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_runs), - '__call__') as call: - client.list_tensorboard_runs() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ListTensorboardRunsRequest() - - -@pytest.mark.asyncio -async def test_list_tensorboard_runs_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.ListTensorboardRunsRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_runs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardRunsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_tensorboard_runs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ListTensorboardRunsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListTensorboardRunsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_tensorboard_runs_async_from_dict(): - await test_list_tensorboard_runs_async(request_type=dict) - - -def test_list_tensorboard_runs_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.ListTensorboardRunsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_runs), - '__call__') as call: - call.return_value = tensorboard_service.ListTensorboardRunsResponse() - client.list_tensorboard_runs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_tensorboard_runs_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.ListTensorboardRunsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_runs), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardRunsResponse()) - await client.list_tensorboard_runs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_tensorboard_runs_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_runs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.ListTensorboardRunsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_tensorboard_runs( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_tensorboard_runs_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_tensorboard_runs( - tensorboard_service.ListTensorboardRunsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_tensorboard_runs_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_runs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.ListTensorboardRunsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardRunsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_tensorboard_runs( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_tensorboard_runs_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_tensorboard_runs( - tensorboard_service.ListTensorboardRunsRequest(), - parent='parent_value', - ) - - -def test_list_tensorboard_runs_pager(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_runs), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - tensorboard_service.ListTensorboardRunsResponse( - tensorboard_runs=[ - tensorboard_run.TensorboardRun(), - tensorboard_run.TensorboardRun(), - tensorboard_run.TensorboardRun(), - ], - next_page_token='abc', - ), - tensorboard_service.ListTensorboardRunsResponse( - tensorboard_runs=[], - next_page_token='def', - ), - tensorboard_service.ListTensorboardRunsResponse( - tensorboard_runs=[ - tensorboard_run.TensorboardRun(), - ], - next_page_token='ghi', - ), - tensorboard_service.ListTensorboardRunsResponse( - tensorboard_runs=[ - tensorboard_run.TensorboardRun(), - tensorboard_run.TensorboardRun(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_tensorboard_runs(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, tensorboard_run.TensorboardRun) - for i in results) - -def test_list_tensorboard_runs_pages(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_runs), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - tensorboard_service.ListTensorboardRunsResponse( - tensorboard_runs=[ - tensorboard_run.TensorboardRun(), - tensorboard_run.TensorboardRun(), - tensorboard_run.TensorboardRun(), - ], - next_page_token='abc', - ), - tensorboard_service.ListTensorboardRunsResponse( - tensorboard_runs=[], - next_page_token='def', - ), - tensorboard_service.ListTensorboardRunsResponse( - tensorboard_runs=[ - tensorboard_run.TensorboardRun(), - ], - next_page_token='ghi', - ), - tensorboard_service.ListTensorboardRunsResponse( - tensorboard_runs=[ - tensorboard_run.TensorboardRun(), - tensorboard_run.TensorboardRun(), - ], - ), - RuntimeError, - ) - pages = list(client.list_tensorboard_runs(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_tensorboard_runs_async_pager(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_runs), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - tensorboard_service.ListTensorboardRunsResponse( - tensorboard_runs=[ - tensorboard_run.TensorboardRun(), - tensorboard_run.TensorboardRun(), - tensorboard_run.TensorboardRun(), - ], - next_page_token='abc', - ), - tensorboard_service.ListTensorboardRunsResponse( - tensorboard_runs=[], - next_page_token='def', - ), - tensorboard_service.ListTensorboardRunsResponse( - tensorboard_runs=[ - tensorboard_run.TensorboardRun(), - ], - next_page_token='ghi', - ), - tensorboard_service.ListTensorboardRunsResponse( - tensorboard_runs=[ - tensorboard_run.TensorboardRun(), - tensorboard_run.TensorboardRun(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_tensorboard_runs(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, tensorboard_run.TensorboardRun) - for i in responses) - -@pytest.mark.asyncio -async def test_list_tensorboard_runs_async_pages(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_runs), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - tensorboard_service.ListTensorboardRunsResponse( - tensorboard_runs=[ - tensorboard_run.TensorboardRun(), - tensorboard_run.TensorboardRun(), - tensorboard_run.TensorboardRun(), - ], - next_page_token='abc', - ), - tensorboard_service.ListTensorboardRunsResponse( - tensorboard_runs=[], - next_page_token='def', - ), - tensorboard_service.ListTensorboardRunsResponse( - tensorboard_runs=[ - tensorboard_run.TensorboardRun(), - ], - next_page_token='ghi', - ), - tensorboard_service.ListTensorboardRunsResponse( - tensorboard_runs=[ - tensorboard_run.TensorboardRun(), - tensorboard_run.TensorboardRun(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_tensorboard_runs(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_delete_tensorboard_run(transport: str = 'grpc', request_type=tensorboard_service.DeleteTensorboardRunRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_run), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_tensorboard_run(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.DeleteTensorboardRunRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_tensorboard_run_from_dict(): - test_delete_tensorboard_run(request_type=dict) - - -def test_delete_tensorboard_run_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_run), - '__call__') as call: - client.delete_tensorboard_run() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.DeleteTensorboardRunRequest() - - -@pytest.mark.asyncio -async def test_delete_tensorboard_run_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.DeleteTensorboardRunRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_run), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_tensorboard_run(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.DeleteTensorboardRunRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_tensorboard_run_async_from_dict(): - await test_delete_tensorboard_run_async(request_type=dict) - - -def test_delete_tensorboard_run_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.DeleteTensorboardRunRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_run), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_tensorboard_run(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_tensorboard_run_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.DeleteTensorboardRunRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_run), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_tensorboard_run(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_tensorboard_run_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_run), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_tensorboard_run( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_delete_tensorboard_run_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_tensorboard_run( - tensorboard_service.DeleteTensorboardRunRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_tensorboard_run_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_run), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_tensorboard_run( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_tensorboard_run_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_tensorboard_run( - tensorboard_service.DeleteTensorboardRunRequest(), - name='name_value', - ) - - -def test_batch_create_tensorboard_time_series(transport: str = 'grpc', request_type=tensorboard_service.BatchCreateTensorboardTimeSeriesRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_create_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.BatchCreateTensorboardTimeSeriesResponse( - ) - response = client.batch_create_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.BatchCreateTensorboardTimeSeriesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, tensorboard_service.BatchCreateTensorboardTimeSeriesResponse) - - -def test_batch_create_tensorboard_time_series_from_dict(): - test_batch_create_tensorboard_time_series(request_type=dict) - - -def test_batch_create_tensorboard_time_series_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_create_tensorboard_time_series), - '__call__') as call: - client.batch_create_tensorboard_time_series() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.BatchCreateTensorboardTimeSeriesRequest() - - -@pytest.mark.asyncio -async def test_batch_create_tensorboard_time_series_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.BatchCreateTensorboardTimeSeriesRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_create_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.BatchCreateTensorboardTimeSeriesResponse( - )) - response = await client.batch_create_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.BatchCreateTensorboardTimeSeriesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, tensorboard_service.BatchCreateTensorboardTimeSeriesResponse) - - -@pytest.mark.asyncio -async def test_batch_create_tensorboard_time_series_async_from_dict(): - await test_batch_create_tensorboard_time_series_async(request_type=dict) - - -def test_batch_create_tensorboard_time_series_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.BatchCreateTensorboardTimeSeriesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_create_tensorboard_time_series), - '__call__') as call: - call.return_value = tensorboard_service.BatchCreateTensorboardTimeSeriesResponse() - client.batch_create_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_batch_create_tensorboard_time_series_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.BatchCreateTensorboardTimeSeriesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_create_tensorboard_time_series), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.BatchCreateTensorboardTimeSeriesResponse()) - await client.batch_create_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_batch_create_tensorboard_time_series_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_create_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.BatchCreateTensorboardTimeSeriesResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.batch_create_tensorboard_time_series( - parent='parent_value', - requests=[tensorboard_service.CreateTensorboardTimeSeriesRequest(parent='parent_value')], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].requests - mock_val = [tensorboard_service.CreateTensorboardTimeSeriesRequest(parent='parent_value')] - assert arg == mock_val - - -def test_batch_create_tensorboard_time_series_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.batch_create_tensorboard_time_series( - tensorboard_service.BatchCreateTensorboardTimeSeriesRequest(), - parent='parent_value', - requests=[tensorboard_service.CreateTensorboardTimeSeriesRequest(parent='parent_value')], - ) - - -@pytest.mark.asyncio -async def test_batch_create_tensorboard_time_series_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_create_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.BatchCreateTensorboardTimeSeriesResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.BatchCreateTensorboardTimeSeriesResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.batch_create_tensorboard_time_series( - parent='parent_value', - requests=[tensorboard_service.CreateTensorboardTimeSeriesRequest(parent='parent_value')], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].requests - mock_val = [tensorboard_service.CreateTensorboardTimeSeriesRequest(parent='parent_value')] - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_batch_create_tensorboard_time_series_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.batch_create_tensorboard_time_series( - tensorboard_service.BatchCreateTensorboardTimeSeriesRequest(), - parent='parent_value', - requests=[tensorboard_service.CreateTensorboardTimeSeriesRequest(parent='parent_value')], - ) - - -def test_create_tensorboard_time_series(transport: str = 'grpc', request_type=tensorboard_service.CreateTensorboardTimeSeriesRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries( - name='name_value', - display_name='display_name_value', - description='description_value', - value_type=gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR, - etag='etag_value', - plugin_name='plugin_name_value', - plugin_data=b'plugin_data_blob', - ) - response = client.create_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.CreateTensorboardTimeSeriesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_tensorboard_time_series.TensorboardTimeSeries) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.value_type == gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR - assert response.etag == 'etag_value' - assert response.plugin_name == 'plugin_name_value' - assert response.plugin_data == b'plugin_data_blob' - - -def test_create_tensorboard_time_series_from_dict(): - test_create_tensorboard_time_series(request_type=dict) - - -def test_create_tensorboard_time_series_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_time_series), - '__call__') as call: - client.create_tensorboard_time_series() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.CreateTensorboardTimeSeriesRequest() - - -@pytest.mark.asyncio -async def test_create_tensorboard_time_series_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.CreateTensorboardTimeSeriesRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_time_series.TensorboardTimeSeries( - name='name_value', - display_name='display_name_value', - description='description_value', - value_type=gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR, - etag='etag_value', - plugin_name='plugin_name_value', - plugin_data=b'plugin_data_blob', - )) - response = await client.create_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.CreateTensorboardTimeSeriesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_tensorboard_time_series.TensorboardTimeSeries) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.value_type == gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR - assert response.etag == 'etag_value' - assert response.plugin_name == 'plugin_name_value' - assert response.plugin_data == b'plugin_data_blob' - - -@pytest.mark.asyncio -async def test_create_tensorboard_time_series_async_from_dict(): - await test_create_tensorboard_time_series_async(request_type=dict) - - -def test_create_tensorboard_time_series_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.CreateTensorboardTimeSeriesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_time_series), - '__call__') as call: - call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries() - client.create_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_tensorboard_time_series_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.CreateTensorboardTimeSeriesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_time_series), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_time_series.TensorboardTimeSeries()) - await client.create_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_tensorboard_time_series_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_tensorboard_time_series( - parent='parent_value', - tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].tensorboard_time_series - mock_val = gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value') - assert arg == mock_val - - -def test_create_tensorboard_time_series_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_tensorboard_time_series( - tensorboard_service.CreateTensorboardTimeSeriesRequest(), - parent='parent_value', - tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value'), - ) - - -@pytest.mark.asyncio -async def test_create_tensorboard_time_series_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_time_series.TensorboardTimeSeries()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_tensorboard_time_series( - parent='parent_value', - tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].tensorboard_time_series - mock_val = gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value') - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_tensorboard_time_series_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_tensorboard_time_series( - tensorboard_service.CreateTensorboardTimeSeriesRequest(), - parent='parent_value', - tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value'), - ) - - -def test_get_tensorboard_time_series(transport: str = 'grpc', request_type=tensorboard_service.GetTensorboardTimeSeriesRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_time_series.TensorboardTimeSeries( - name='name_value', - display_name='display_name_value', - description='description_value', - value_type=tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR, - etag='etag_value', - plugin_name='plugin_name_value', - plugin_data=b'plugin_data_blob', - ) - response = client.get_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.GetTensorboardTimeSeriesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, tensorboard_time_series.TensorboardTimeSeries) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.value_type == tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR - assert response.etag == 'etag_value' - assert response.plugin_name == 'plugin_name_value' - assert response.plugin_data == b'plugin_data_blob' - - -def test_get_tensorboard_time_series_from_dict(): - test_get_tensorboard_time_series(request_type=dict) - - -def test_get_tensorboard_time_series_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_time_series), - '__call__') as call: - client.get_tensorboard_time_series() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.GetTensorboardTimeSeriesRequest() - - -@pytest.mark.asyncio -async def test_get_tensorboard_time_series_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.GetTensorboardTimeSeriesRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_time_series.TensorboardTimeSeries( - name='name_value', - display_name='display_name_value', - description='description_value', - value_type=tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR, - etag='etag_value', - plugin_name='plugin_name_value', - plugin_data=b'plugin_data_blob', - )) - response = await client.get_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.GetTensorboardTimeSeriesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, tensorboard_time_series.TensorboardTimeSeries) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.value_type == tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR - assert response.etag == 'etag_value' - assert response.plugin_name == 'plugin_name_value' - assert response.plugin_data == b'plugin_data_blob' - - -@pytest.mark.asyncio -async def test_get_tensorboard_time_series_async_from_dict(): - await test_get_tensorboard_time_series_async(request_type=dict) - - -def test_get_tensorboard_time_series_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.GetTensorboardTimeSeriesRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_time_series), - '__call__') as call: - call.return_value = tensorboard_time_series.TensorboardTimeSeries() - client.get_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_tensorboard_time_series_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.GetTensorboardTimeSeriesRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_time_series), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_time_series.TensorboardTimeSeries()) - await client.get_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_tensorboard_time_series_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_time_series.TensorboardTimeSeries() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_tensorboard_time_series( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_tensorboard_time_series_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_tensorboard_time_series( - tensorboard_service.GetTensorboardTimeSeriesRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_tensorboard_time_series_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_time_series.TensorboardTimeSeries() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_time_series.TensorboardTimeSeries()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_tensorboard_time_series( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_tensorboard_time_series_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_tensorboard_time_series( - tensorboard_service.GetTensorboardTimeSeriesRequest(), - name='name_value', - ) - - -def test_update_tensorboard_time_series(transport: str = 'grpc', request_type=tensorboard_service.UpdateTensorboardTimeSeriesRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries( - name='name_value', - display_name='display_name_value', - description='description_value', - value_type=gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR, - etag='etag_value', - plugin_name='plugin_name_value', - plugin_data=b'plugin_data_blob', - ) - response = client.update_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.UpdateTensorboardTimeSeriesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_tensorboard_time_series.TensorboardTimeSeries) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.value_type == gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR - assert response.etag == 'etag_value' - assert response.plugin_name == 'plugin_name_value' - assert response.plugin_data == b'plugin_data_blob' - - -def test_update_tensorboard_time_series_from_dict(): - test_update_tensorboard_time_series(request_type=dict) - - -def test_update_tensorboard_time_series_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_time_series), - '__call__') as call: - client.update_tensorboard_time_series() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.UpdateTensorboardTimeSeriesRequest() - - -@pytest.mark.asyncio -async def test_update_tensorboard_time_series_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.UpdateTensorboardTimeSeriesRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_time_series.TensorboardTimeSeries( - name='name_value', - display_name='display_name_value', - description='description_value', - value_type=gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR, - etag='etag_value', - plugin_name='plugin_name_value', - plugin_data=b'plugin_data_blob', - )) - response = await client.update_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.UpdateTensorboardTimeSeriesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_tensorboard_time_series.TensorboardTimeSeries) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.value_type == gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR - assert response.etag == 'etag_value' - assert response.plugin_name == 'plugin_name_value' - assert response.plugin_data == b'plugin_data_blob' - - -@pytest.mark.asyncio -async def test_update_tensorboard_time_series_async_from_dict(): - await test_update_tensorboard_time_series_async(request_type=dict) - - -def test_update_tensorboard_time_series_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.UpdateTensorboardTimeSeriesRequest() - - request.tensorboard_time_series.name = 'tensorboard_time_series.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_time_series), - '__call__') as call: - call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries() - client.update_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'tensorboard_time_series.name=tensorboard_time_series.name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_tensorboard_time_series_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.UpdateTensorboardTimeSeriesRequest() - - request.tensorboard_time_series.name = 'tensorboard_time_series.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_time_series), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_time_series.TensorboardTimeSeries()) - await client.update_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'tensorboard_time_series.name=tensorboard_time_series.name/value', - ) in kw['metadata'] - - -def test_update_tensorboard_time_series_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_tensorboard_time_series( - tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].tensorboard_time_series - mock_val = gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -def test_update_tensorboard_time_series_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_tensorboard_time_series( - tensorboard_service.UpdateTensorboardTimeSeriesRequest(), - tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.asyncio -async def test_update_tensorboard_time_series_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_time_series.TensorboardTimeSeries()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_tensorboard_time_series( - tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].tensorboard_time_series - mock_val = gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_update_tensorboard_time_series_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_tensorboard_time_series( - tensorboard_service.UpdateTensorboardTimeSeriesRequest(), - tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_list_tensorboard_time_series(transport: str = 'grpc', request_type=tensorboard_service.ListTensorboardTimeSeriesRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.ListTensorboardTimeSeriesResponse( - next_page_token='next_page_token_value', - ) - response = client.list_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ListTensorboardTimeSeriesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListTensorboardTimeSeriesPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_tensorboard_time_series_from_dict(): - test_list_tensorboard_time_series(request_type=dict) - - -def test_list_tensorboard_time_series_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_time_series), - '__call__') as call: - client.list_tensorboard_time_series() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ListTensorboardTimeSeriesRequest() - - -@pytest.mark.asyncio -async def test_list_tensorboard_time_series_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.ListTensorboardTimeSeriesRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardTimeSeriesResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ListTensorboardTimeSeriesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListTensorboardTimeSeriesAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_tensorboard_time_series_async_from_dict(): - await test_list_tensorboard_time_series_async(request_type=dict) - - -def test_list_tensorboard_time_series_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.ListTensorboardTimeSeriesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_time_series), - '__call__') as call: - call.return_value = tensorboard_service.ListTensorboardTimeSeriesResponse() - client.list_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_tensorboard_time_series_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.ListTensorboardTimeSeriesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_time_series), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardTimeSeriesResponse()) - await client.list_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_tensorboard_time_series_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.ListTensorboardTimeSeriesResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_tensorboard_time_series( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_tensorboard_time_series_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_tensorboard_time_series( - tensorboard_service.ListTensorboardTimeSeriesRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_tensorboard_time_series_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.ListTensorboardTimeSeriesResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardTimeSeriesResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_tensorboard_time_series( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_tensorboard_time_series_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_tensorboard_time_series( - tensorboard_service.ListTensorboardTimeSeriesRequest(), - parent='parent_value', - ) - - -def test_list_tensorboard_time_series_pager(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_time_series), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - tensorboard_service.ListTensorboardTimeSeriesResponse( - tensorboard_time_series=[ - tensorboard_time_series.TensorboardTimeSeries(), - tensorboard_time_series.TensorboardTimeSeries(), - tensorboard_time_series.TensorboardTimeSeries(), - ], - next_page_token='abc', - ), - tensorboard_service.ListTensorboardTimeSeriesResponse( - tensorboard_time_series=[], - next_page_token='def', - ), - tensorboard_service.ListTensorboardTimeSeriesResponse( - tensorboard_time_series=[ - tensorboard_time_series.TensorboardTimeSeries(), - ], - next_page_token='ghi', - ), - tensorboard_service.ListTensorboardTimeSeriesResponse( - tensorboard_time_series=[ - tensorboard_time_series.TensorboardTimeSeries(), - tensorboard_time_series.TensorboardTimeSeries(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_tensorboard_time_series(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, tensorboard_time_series.TensorboardTimeSeries) - for i in results) - -def test_list_tensorboard_time_series_pages(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_time_series), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - tensorboard_service.ListTensorboardTimeSeriesResponse( - tensorboard_time_series=[ - tensorboard_time_series.TensorboardTimeSeries(), - tensorboard_time_series.TensorboardTimeSeries(), - tensorboard_time_series.TensorboardTimeSeries(), - ], - next_page_token='abc', - ), - tensorboard_service.ListTensorboardTimeSeriesResponse( - tensorboard_time_series=[], - next_page_token='def', - ), - tensorboard_service.ListTensorboardTimeSeriesResponse( - tensorboard_time_series=[ - tensorboard_time_series.TensorboardTimeSeries(), - ], - next_page_token='ghi', - ), - tensorboard_service.ListTensorboardTimeSeriesResponse( - tensorboard_time_series=[ - tensorboard_time_series.TensorboardTimeSeries(), - tensorboard_time_series.TensorboardTimeSeries(), - ], - ), - RuntimeError, - ) - pages = list(client.list_tensorboard_time_series(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_tensorboard_time_series_async_pager(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_time_series), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - tensorboard_service.ListTensorboardTimeSeriesResponse( - tensorboard_time_series=[ - tensorboard_time_series.TensorboardTimeSeries(), - tensorboard_time_series.TensorboardTimeSeries(), - tensorboard_time_series.TensorboardTimeSeries(), - ], - next_page_token='abc', - ), - tensorboard_service.ListTensorboardTimeSeriesResponse( - tensorboard_time_series=[], - next_page_token='def', - ), - tensorboard_service.ListTensorboardTimeSeriesResponse( - tensorboard_time_series=[ - tensorboard_time_series.TensorboardTimeSeries(), - ], - next_page_token='ghi', - ), - tensorboard_service.ListTensorboardTimeSeriesResponse( - tensorboard_time_series=[ - tensorboard_time_series.TensorboardTimeSeries(), - tensorboard_time_series.TensorboardTimeSeries(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_tensorboard_time_series(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, tensorboard_time_series.TensorboardTimeSeries) - for i in responses) - -@pytest.mark.asyncio -async def test_list_tensorboard_time_series_async_pages(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_time_series), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - tensorboard_service.ListTensorboardTimeSeriesResponse( - tensorboard_time_series=[ - tensorboard_time_series.TensorboardTimeSeries(), - tensorboard_time_series.TensorboardTimeSeries(), - tensorboard_time_series.TensorboardTimeSeries(), - ], - next_page_token='abc', - ), - tensorboard_service.ListTensorboardTimeSeriesResponse( - tensorboard_time_series=[], - next_page_token='def', - ), - tensorboard_service.ListTensorboardTimeSeriesResponse( - tensorboard_time_series=[ - tensorboard_time_series.TensorboardTimeSeries(), - ], - next_page_token='ghi', - ), - tensorboard_service.ListTensorboardTimeSeriesResponse( - tensorboard_time_series=[ - tensorboard_time_series.TensorboardTimeSeries(), - tensorboard_time_series.TensorboardTimeSeries(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_tensorboard_time_series(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_delete_tensorboard_time_series(transport: str = 'grpc', request_type=tensorboard_service.DeleteTensorboardTimeSeriesRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.DeleteTensorboardTimeSeriesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_tensorboard_time_series_from_dict(): - test_delete_tensorboard_time_series(request_type=dict) - - -def test_delete_tensorboard_time_series_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_time_series), - '__call__') as call: - client.delete_tensorboard_time_series() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.DeleteTensorboardTimeSeriesRequest() - - -@pytest.mark.asyncio -async def test_delete_tensorboard_time_series_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.DeleteTensorboardTimeSeriesRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.DeleteTensorboardTimeSeriesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_tensorboard_time_series_async_from_dict(): - await test_delete_tensorboard_time_series_async(request_type=dict) - - -def test_delete_tensorboard_time_series_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.DeleteTensorboardTimeSeriesRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_time_series), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_tensorboard_time_series_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.DeleteTensorboardTimeSeriesRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_time_series), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_tensorboard_time_series_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_tensorboard_time_series( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_delete_tensorboard_time_series_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_tensorboard_time_series( - tensorboard_service.DeleteTensorboardTimeSeriesRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_tensorboard_time_series_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_tensorboard_time_series( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_tensorboard_time_series_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_tensorboard_time_series( - tensorboard_service.DeleteTensorboardTimeSeriesRequest(), - name='name_value', - ) - - -def test_batch_read_tensorboard_time_series_data(transport: str = 'grpc', request_type=tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_read_tensorboard_time_series_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse( - ) - response = client.batch_read_tensorboard_time_series_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse) - - -def test_batch_read_tensorboard_time_series_data_from_dict(): - test_batch_read_tensorboard_time_series_data(request_type=dict) - - -def test_batch_read_tensorboard_time_series_data_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_read_tensorboard_time_series_data), - '__call__') as call: - client.batch_read_tensorboard_time_series_data() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest() - - -@pytest.mark.asyncio -async def test_batch_read_tensorboard_time_series_data_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_read_tensorboard_time_series_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse( - )) - response = await client.batch_read_tensorboard_time_series_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse) - - -@pytest.mark.asyncio -async def test_batch_read_tensorboard_time_series_data_async_from_dict(): - await test_batch_read_tensorboard_time_series_data_async(request_type=dict) - - -def test_batch_read_tensorboard_time_series_data_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest() - - request.tensorboard = 'tensorboard/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_read_tensorboard_time_series_data), - '__call__') as call: - call.return_value = tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse() - client.batch_read_tensorboard_time_series_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'tensorboard=tensorboard/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_batch_read_tensorboard_time_series_data_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest() - - request.tensorboard = 'tensorboard/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_read_tensorboard_time_series_data), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse()) - await client.batch_read_tensorboard_time_series_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'tensorboard=tensorboard/value', - ) in kw['metadata'] - - -def test_batch_read_tensorboard_time_series_data_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_read_tensorboard_time_series_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.batch_read_tensorboard_time_series_data( - tensorboard='tensorboard_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].tensorboard - mock_val = 'tensorboard_value' - assert arg == mock_val - - -def test_batch_read_tensorboard_time_series_data_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.batch_read_tensorboard_time_series_data( - tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest(), - tensorboard='tensorboard_value', - ) - - -@pytest.mark.asyncio -async def test_batch_read_tensorboard_time_series_data_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_read_tensorboard_time_series_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.batch_read_tensorboard_time_series_data( - tensorboard='tensorboard_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].tensorboard - mock_val = 'tensorboard_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_batch_read_tensorboard_time_series_data_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.batch_read_tensorboard_time_series_data( - tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest(), - tensorboard='tensorboard_value', - ) - - -def test_read_tensorboard_time_series_data(transport: str = 'grpc', request_type=tensorboard_service.ReadTensorboardTimeSeriesDataRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_tensorboard_time_series_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.ReadTensorboardTimeSeriesDataResponse( - ) - response = client.read_tensorboard_time_series_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ReadTensorboardTimeSeriesDataRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, tensorboard_service.ReadTensorboardTimeSeriesDataResponse) - - -def test_read_tensorboard_time_series_data_from_dict(): - test_read_tensorboard_time_series_data(request_type=dict) - - -def test_read_tensorboard_time_series_data_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_tensorboard_time_series_data), - '__call__') as call: - client.read_tensorboard_time_series_data() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ReadTensorboardTimeSeriesDataRequest() - - -@pytest.mark.asyncio -async def test_read_tensorboard_time_series_data_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.ReadTensorboardTimeSeriesDataRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_tensorboard_time_series_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ReadTensorboardTimeSeriesDataResponse( - )) - response = await client.read_tensorboard_time_series_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ReadTensorboardTimeSeriesDataRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, tensorboard_service.ReadTensorboardTimeSeriesDataResponse) - - -@pytest.mark.asyncio -async def test_read_tensorboard_time_series_data_async_from_dict(): - await test_read_tensorboard_time_series_data_async(request_type=dict) - - -def test_read_tensorboard_time_series_data_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.ReadTensorboardTimeSeriesDataRequest() - - request.tensorboard_time_series = 'tensorboard_time_series/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_tensorboard_time_series_data), - '__call__') as call: - call.return_value = tensorboard_service.ReadTensorboardTimeSeriesDataResponse() - client.read_tensorboard_time_series_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'tensorboard_time_series=tensorboard_time_series/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_read_tensorboard_time_series_data_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.ReadTensorboardTimeSeriesDataRequest() - - request.tensorboard_time_series = 'tensorboard_time_series/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_tensorboard_time_series_data), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ReadTensorboardTimeSeriesDataResponse()) - await client.read_tensorboard_time_series_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'tensorboard_time_series=tensorboard_time_series/value', - ) in kw['metadata'] - - -def test_read_tensorboard_time_series_data_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_tensorboard_time_series_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.ReadTensorboardTimeSeriesDataResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.read_tensorboard_time_series_data( - tensorboard_time_series='tensorboard_time_series_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].tensorboard_time_series - mock_val = 'tensorboard_time_series_value' - assert arg == mock_val - - -def test_read_tensorboard_time_series_data_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.read_tensorboard_time_series_data( - tensorboard_service.ReadTensorboardTimeSeriesDataRequest(), - tensorboard_time_series='tensorboard_time_series_value', - ) - - -@pytest.mark.asyncio -async def test_read_tensorboard_time_series_data_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_tensorboard_time_series_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.ReadTensorboardTimeSeriesDataResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ReadTensorboardTimeSeriesDataResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.read_tensorboard_time_series_data( - tensorboard_time_series='tensorboard_time_series_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].tensorboard_time_series - mock_val = 'tensorboard_time_series_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_read_tensorboard_time_series_data_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.read_tensorboard_time_series_data( - tensorboard_service.ReadTensorboardTimeSeriesDataRequest(), - tensorboard_time_series='tensorboard_time_series_value', - ) - - -def test_read_tensorboard_blob_data(transport: str = 'grpc', request_type=tensorboard_service.ReadTensorboardBlobDataRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_tensorboard_blob_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = iter([tensorboard_service.ReadTensorboardBlobDataResponse()]) - response = client.read_tensorboard_blob_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ReadTensorboardBlobDataRequest() - - # Establish that the response is the type that we expect. - for message in response: - assert isinstance(message, tensorboard_service.ReadTensorboardBlobDataResponse) - - -def test_read_tensorboard_blob_data_from_dict(): - test_read_tensorboard_blob_data(request_type=dict) - - -def test_read_tensorboard_blob_data_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_tensorboard_blob_data), - '__call__') as call: - client.read_tensorboard_blob_data() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ReadTensorboardBlobDataRequest() - - -@pytest.mark.asyncio -async def test_read_tensorboard_blob_data_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.ReadTensorboardBlobDataRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_tensorboard_blob_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) - call.return_value.read = mock.AsyncMock(side_effect=[tensorboard_service.ReadTensorboardBlobDataResponse()]) - response = await client.read_tensorboard_blob_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ReadTensorboardBlobDataRequest() - - # Establish that the response is the type that we expect. - message = await response.read() - assert isinstance(message, tensorboard_service.ReadTensorboardBlobDataResponse) - - -@pytest.mark.asyncio -async def test_read_tensorboard_blob_data_async_from_dict(): - await test_read_tensorboard_blob_data_async(request_type=dict) - - -def test_read_tensorboard_blob_data_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.ReadTensorboardBlobDataRequest() - - request.time_series = 'time_series/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_tensorboard_blob_data), - '__call__') as call: - call.return_value = iter([tensorboard_service.ReadTensorboardBlobDataResponse()]) - client.read_tensorboard_blob_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'time_series=time_series/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_read_tensorboard_blob_data_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.ReadTensorboardBlobDataRequest() - - request.time_series = 'time_series/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_tensorboard_blob_data), - '__call__') as call: - call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) - call.return_value.read = mock.AsyncMock(side_effect=[tensorboard_service.ReadTensorboardBlobDataResponse()]) - await client.read_tensorboard_blob_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'time_series=time_series/value', - ) in kw['metadata'] - - -def test_read_tensorboard_blob_data_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_tensorboard_blob_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = iter([tensorboard_service.ReadTensorboardBlobDataResponse()]) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.read_tensorboard_blob_data( - time_series='time_series_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].time_series - mock_val = 'time_series_value' - assert arg == mock_val - - -def test_read_tensorboard_blob_data_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.read_tensorboard_blob_data( - tensorboard_service.ReadTensorboardBlobDataRequest(), - time_series='time_series_value', - ) - - -@pytest.mark.asyncio -async def test_read_tensorboard_blob_data_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_tensorboard_blob_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = iter([tensorboard_service.ReadTensorboardBlobDataResponse()]) - - call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.read_tensorboard_blob_data( - time_series='time_series_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].time_series - mock_val = 'time_series_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_read_tensorboard_blob_data_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.read_tensorboard_blob_data( - tensorboard_service.ReadTensorboardBlobDataRequest(), - time_series='time_series_value', - ) - - -def test_write_tensorboard_experiment_data(transport: str = 'grpc', request_type=tensorboard_service.WriteTensorboardExperimentDataRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.write_tensorboard_experiment_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.WriteTensorboardExperimentDataResponse( - ) - response = client.write_tensorboard_experiment_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.WriteTensorboardExperimentDataRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, tensorboard_service.WriteTensorboardExperimentDataResponse) - - -def test_write_tensorboard_experiment_data_from_dict(): - test_write_tensorboard_experiment_data(request_type=dict) - - -def test_write_tensorboard_experiment_data_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.write_tensorboard_experiment_data), - '__call__') as call: - client.write_tensorboard_experiment_data() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.WriteTensorboardExperimentDataRequest() - - -@pytest.mark.asyncio -async def test_write_tensorboard_experiment_data_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.WriteTensorboardExperimentDataRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.write_tensorboard_experiment_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.WriteTensorboardExperimentDataResponse( - )) - response = await client.write_tensorboard_experiment_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.WriteTensorboardExperimentDataRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, tensorboard_service.WriteTensorboardExperimentDataResponse) - - -@pytest.mark.asyncio -async def test_write_tensorboard_experiment_data_async_from_dict(): - await test_write_tensorboard_experiment_data_async(request_type=dict) - - -def test_write_tensorboard_experiment_data_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.WriteTensorboardExperimentDataRequest() - - request.tensorboard_experiment = 'tensorboard_experiment/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.write_tensorboard_experiment_data), - '__call__') as call: - call.return_value = tensorboard_service.WriteTensorboardExperimentDataResponse() - client.write_tensorboard_experiment_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'tensorboard_experiment=tensorboard_experiment/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_write_tensorboard_experiment_data_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.WriteTensorboardExperimentDataRequest() - - request.tensorboard_experiment = 'tensorboard_experiment/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.write_tensorboard_experiment_data), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.WriteTensorboardExperimentDataResponse()) - await client.write_tensorboard_experiment_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'tensorboard_experiment=tensorboard_experiment/value', - ) in kw['metadata'] - - -def test_write_tensorboard_experiment_data_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.write_tensorboard_experiment_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.WriteTensorboardExperimentDataResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.write_tensorboard_experiment_data( - tensorboard_experiment='tensorboard_experiment_value', - write_run_data_requests=[tensorboard_service.WriteTensorboardRunDataRequest(tensorboard_run='tensorboard_run_value')], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].tensorboard_experiment - mock_val = 'tensorboard_experiment_value' - assert arg == mock_val - arg = args[0].write_run_data_requests - mock_val = [tensorboard_service.WriteTensorboardRunDataRequest(tensorboard_run='tensorboard_run_value')] - assert arg == mock_val - - -def test_write_tensorboard_experiment_data_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.write_tensorboard_experiment_data( - tensorboard_service.WriteTensorboardExperimentDataRequest(), - tensorboard_experiment='tensorboard_experiment_value', - write_run_data_requests=[tensorboard_service.WriteTensorboardRunDataRequest(tensorboard_run='tensorboard_run_value')], - ) - - -@pytest.mark.asyncio -async def test_write_tensorboard_experiment_data_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.write_tensorboard_experiment_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.WriteTensorboardExperimentDataResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.WriteTensorboardExperimentDataResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.write_tensorboard_experiment_data( - tensorboard_experiment='tensorboard_experiment_value', - write_run_data_requests=[tensorboard_service.WriteTensorboardRunDataRequest(tensorboard_run='tensorboard_run_value')], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].tensorboard_experiment - mock_val = 'tensorboard_experiment_value' - assert arg == mock_val - arg = args[0].write_run_data_requests - mock_val = [tensorboard_service.WriteTensorboardRunDataRequest(tensorboard_run='tensorboard_run_value')] - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_write_tensorboard_experiment_data_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.write_tensorboard_experiment_data( - tensorboard_service.WriteTensorboardExperimentDataRequest(), - tensorboard_experiment='tensorboard_experiment_value', - write_run_data_requests=[tensorboard_service.WriteTensorboardRunDataRequest(tensorboard_run='tensorboard_run_value')], - ) - - -def test_write_tensorboard_run_data(transport: str = 'grpc', request_type=tensorboard_service.WriteTensorboardRunDataRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.write_tensorboard_run_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.WriteTensorboardRunDataResponse( - ) - response = client.write_tensorboard_run_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.WriteTensorboardRunDataRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, tensorboard_service.WriteTensorboardRunDataResponse) - - -def test_write_tensorboard_run_data_from_dict(): - test_write_tensorboard_run_data(request_type=dict) - - -def test_write_tensorboard_run_data_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.write_tensorboard_run_data), - '__call__') as call: - client.write_tensorboard_run_data() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.WriteTensorboardRunDataRequest() - - -@pytest.mark.asyncio -async def test_write_tensorboard_run_data_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.WriteTensorboardRunDataRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.write_tensorboard_run_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.WriteTensorboardRunDataResponse( - )) - response = await client.write_tensorboard_run_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.WriteTensorboardRunDataRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, tensorboard_service.WriteTensorboardRunDataResponse) - - -@pytest.mark.asyncio -async def test_write_tensorboard_run_data_async_from_dict(): - await test_write_tensorboard_run_data_async(request_type=dict) - - -def test_write_tensorboard_run_data_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.WriteTensorboardRunDataRequest() - - request.tensorboard_run = 'tensorboard_run/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.write_tensorboard_run_data), - '__call__') as call: - call.return_value = tensorboard_service.WriteTensorboardRunDataResponse() - client.write_tensorboard_run_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'tensorboard_run=tensorboard_run/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_write_tensorboard_run_data_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.WriteTensorboardRunDataRequest() - - request.tensorboard_run = 'tensorboard_run/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.write_tensorboard_run_data), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.WriteTensorboardRunDataResponse()) - await client.write_tensorboard_run_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'tensorboard_run=tensorboard_run/value', - ) in kw['metadata'] - - -def test_write_tensorboard_run_data_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.write_tensorboard_run_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.WriteTensorboardRunDataResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.write_tensorboard_run_data( - tensorboard_run='tensorboard_run_value', - time_series_data=[tensorboard_data.TimeSeriesData(tensorboard_time_series_id='tensorboard_time_series_id_value')], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].tensorboard_run - mock_val = 'tensorboard_run_value' - assert arg == mock_val - arg = args[0].time_series_data - mock_val = [tensorboard_data.TimeSeriesData(tensorboard_time_series_id='tensorboard_time_series_id_value')] - assert arg == mock_val - - -def test_write_tensorboard_run_data_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.write_tensorboard_run_data( - tensorboard_service.WriteTensorboardRunDataRequest(), - tensorboard_run='tensorboard_run_value', - time_series_data=[tensorboard_data.TimeSeriesData(tensorboard_time_series_id='tensorboard_time_series_id_value')], - ) - - -@pytest.mark.asyncio -async def test_write_tensorboard_run_data_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.write_tensorboard_run_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.WriteTensorboardRunDataResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.WriteTensorboardRunDataResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.write_tensorboard_run_data( - tensorboard_run='tensorboard_run_value', - time_series_data=[tensorboard_data.TimeSeriesData(tensorboard_time_series_id='tensorboard_time_series_id_value')], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].tensorboard_run - mock_val = 'tensorboard_run_value' - assert arg == mock_val - arg = args[0].time_series_data - mock_val = [tensorboard_data.TimeSeriesData(tensorboard_time_series_id='tensorboard_time_series_id_value')] - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_write_tensorboard_run_data_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.write_tensorboard_run_data( - tensorboard_service.WriteTensorboardRunDataRequest(), - tensorboard_run='tensorboard_run_value', - time_series_data=[tensorboard_data.TimeSeriesData(tensorboard_time_series_id='tensorboard_time_series_id_value')], - ) - - -def test_export_tensorboard_time_series_data(transport: str = 'grpc', request_type=tensorboard_service.ExportTensorboardTimeSeriesDataRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_tensorboard_time_series_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.ExportTensorboardTimeSeriesDataResponse( - next_page_token='next_page_token_value', - ) - response = client.export_tensorboard_time_series_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ExportTensorboardTimeSeriesDataRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ExportTensorboardTimeSeriesDataPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_export_tensorboard_time_series_data_from_dict(): - test_export_tensorboard_time_series_data(request_type=dict) - - -def test_export_tensorboard_time_series_data_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_tensorboard_time_series_data), - '__call__') as call: - client.export_tensorboard_time_series_data() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ExportTensorboardTimeSeriesDataRequest() - - -@pytest.mark.asyncio -async def test_export_tensorboard_time_series_data_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.ExportTensorboardTimeSeriesDataRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_tensorboard_time_series_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ExportTensorboardTimeSeriesDataResponse( - next_page_token='next_page_token_value', - )) - response = await client.export_tensorboard_time_series_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ExportTensorboardTimeSeriesDataRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ExportTensorboardTimeSeriesDataAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_export_tensorboard_time_series_data_async_from_dict(): - await test_export_tensorboard_time_series_data_async(request_type=dict) - - -def test_export_tensorboard_time_series_data_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.ExportTensorboardTimeSeriesDataRequest() - - request.tensorboard_time_series = 'tensorboard_time_series/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_tensorboard_time_series_data), - '__call__') as call: - call.return_value = tensorboard_service.ExportTensorboardTimeSeriesDataResponse() - client.export_tensorboard_time_series_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'tensorboard_time_series=tensorboard_time_series/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_export_tensorboard_time_series_data_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.ExportTensorboardTimeSeriesDataRequest() - - request.tensorboard_time_series = 'tensorboard_time_series/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_tensorboard_time_series_data), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ExportTensorboardTimeSeriesDataResponse()) - await client.export_tensorboard_time_series_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'tensorboard_time_series=tensorboard_time_series/value', - ) in kw['metadata'] - - -def test_export_tensorboard_time_series_data_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_tensorboard_time_series_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.ExportTensorboardTimeSeriesDataResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.export_tensorboard_time_series_data( - tensorboard_time_series='tensorboard_time_series_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].tensorboard_time_series - mock_val = 'tensorboard_time_series_value' - assert arg == mock_val - - -def test_export_tensorboard_time_series_data_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.export_tensorboard_time_series_data( - tensorboard_service.ExportTensorboardTimeSeriesDataRequest(), - tensorboard_time_series='tensorboard_time_series_value', - ) - - -@pytest.mark.asyncio -async def test_export_tensorboard_time_series_data_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_tensorboard_time_series_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.ExportTensorboardTimeSeriesDataResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ExportTensorboardTimeSeriesDataResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.export_tensorboard_time_series_data( - tensorboard_time_series='tensorboard_time_series_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].tensorboard_time_series - mock_val = 'tensorboard_time_series_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_export_tensorboard_time_series_data_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.export_tensorboard_time_series_data( - tensorboard_service.ExportTensorboardTimeSeriesDataRequest(), - tensorboard_time_series='tensorboard_time_series_value', - ) - - -def test_export_tensorboard_time_series_data_pager(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_tensorboard_time_series_data), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - tensorboard_service.ExportTensorboardTimeSeriesDataResponse( - time_series_data_points=[ - tensorboard_data.TimeSeriesDataPoint(), - tensorboard_data.TimeSeriesDataPoint(), - tensorboard_data.TimeSeriesDataPoint(), - ], - next_page_token='abc', - ), - tensorboard_service.ExportTensorboardTimeSeriesDataResponse( - time_series_data_points=[], - next_page_token='def', - ), - tensorboard_service.ExportTensorboardTimeSeriesDataResponse( - time_series_data_points=[ - tensorboard_data.TimeSeriesDataPoint(), - ], - next_page_token='ghi', - ), - tensorboard_service.ExportTensorboardTimeSeriesDataResponse( - time_series_data_points=[ - tensorboard_data.TimeSeriesDataPoint(), - tensorboard_data.TimeSeriesDataPoint(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('tensorboard_time_series', ''), - )), - ) - pager = client.export_tensorboard_time_series_data(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, tensorboard_data.TimeSeriesDataPoint) - for i in results) - -def test_export_tensorboard_time_series_data_pages(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_tensorboard_time_series_data), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - tensorboard_service.ExportTensorboardTimeSeriesDataResponse( - time_series_data_points=[ - tensorboard_data.TimeSeriesDataPoint(), - tensorboard_data.TimeSeriesDataPoint(), - tensorboard_data.TimeSeriesDataPoint(), - ], - next_page_token='abc', - ), - tensorboard_service.ExportTensorboardTimeSeriesDataResponse( - time_series_data_points=[], - next_page_token='def', - ), - tensorboard_service.ExportTensorboardTimeSeriesDataResponse( - time_series_data_points=[ - tensorboard_data.TimeSeriesDataPoint(), - ], - next_page_token='ghi', - ), - tensorboard_service.ExportTensorboardTimeSeriesDataResponse( - time_series_data_points=[ - tensorboard_data.TimeSeriesDataPoint(), - tensorboard_data.TimeSeriesDataPoint(), - ], - ), - RuntimeError, - ) - pages = list(client.export_tensorboard_time_series_data(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_export_tensorboard_time_series_data_async_pager(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_tensorboard_time_series_data), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - tensorboard_service.ExportTensorboardTimeSeriesDataResponse( - time_series_data_points=[ - tensorboard_data.TimeSeriesDataPoint(), - tensorboard_data.TimeSeriesDataPoint(), - tensorboard_data.TimeSeriesDataPoint(), - ], - next_page_token='abc', - ), - tensorboard_service.ExportTensorboardTimeSeriesDataResponse( - time_series_data_points=[], - next_page_token='def', - ), - tensorboard_service.ExportTensorboardTimeSeriesDataResponse( - time_series_data_points=[ - tensorboard_data.TimeSeriesDataPoint(), - ], - next_page_token='ghi', - ), - tensorboard_service.ExportTensorboardTimeSeriesDataResponse( - time_series_data_points=[ - tensorboard_data.TimeSeriesDataPoint(), - tensorboard_data.TimeSeriesDataPoint(), - ], - ), - RuntimeError, - ) - async_pager = await client.export_tensorboard_time_series_data(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, tensorboard_data.TimeSeriesDataPoint) - for i in responses) - -@pytest.mark.asyncio -async def test_export_tensorboard_time_series_data_async_pages(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_tensorboard_time_series_data), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - tensorboard_service.ExportTensorboardTimeSeriesDataResponse( - time_series_data_points=[ - tensorboard_data.TimeSeriesDataPoint(), - tensorboard_data.TimeSeriesDataPoint(), - tensorboard_data.TimeSeriesDataPoint(), - ], - next_page_token='abc', - ), - tensorboard_service.ExportTensorboardTimeSeriesDataResponse( - time_series_data_points=[], - next_page_token='def', - ), - tensorboard_service.ExportTensorboardTimeSeriesDataResponse( - time_series_data_points=[ - tensorboard_data.TimeSeriesDataPoint(), - ], - next_page_token='ghi', - ), - tensorboard_service.ExportTensorboardTimeSeriesDataResponse( - time_series_data_points=[ - tensorboard_data.TimeSeriesDataPoint(), - tensorboard_data.TimeSeriesDataPoint(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.export_tensorboard_time_series_data(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.TensorboardServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.TensorboardServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = TensorboardServiceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.TensorboardServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = TensorboardServiceClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.TensorboardServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = TensorboardServiceClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.TensorboardServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.TensorboardServiceGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.TensorboardServiceGrpcTransport, - transports.TensorboardServiceGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.TensorboardServiceGrpcTransport, - ) - -def test_tensorboard_service_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.TensorboardServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_tensorboard_service_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.aiplatform_v1.services.tensorboard_service.transports.TensorboardServiceTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.TensorboardServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'create_tensorboard', - 'get_tensorboard', - 'update_tensorboard', - 'list_tensorboards', - 'delete_tensorboard', - 'create_tensorboard_experiment', - 'get_tensorboard_experiment', - 'update_tensorboard_experiment', - 'list_tensorboard_experiments', - 'delete_tensorboard_experiment', - 'create_tensorboard_run', - 'batch_create_tensorboard_runs', - 'get_tensorboard_run', - 'update_tensorboard_run', - 'list_tensorboard_runs', - 'delete_tensorboard_run', - 'batch_create_tensorboard_time_series', - 'create_tensorboard_time_series', - 'get_tensorboard_time_series', - 'update_tensorboard_time_series', - 'list_tensorboard_time_series', - 'delete_tensorboard_time_series', - 'batch_read_tensorboard_time_series_data', - 'read_tensorboard_time_series_data', - 'read_tensorboard_blob_data', - 'write_tensorboard_experiment_data', - 'write_tensorboard_run_data', - 'export_tensorboard_time_series_data', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - with pytest.raises(NotImplementedError): - transport.close() - - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client - - -def test_tensorboard_service_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.tensorboard_service.transports.TensorboardServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.TensorboardServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', - 'https://www.googleapis.com/auth/cloud-platform.read-only', -), - quota_project_id="octopus", - ) - - -def test_tensorboard_service_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1.services.tensorboard_service.transports.TensorboardServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.TensorboardServiceTransport() - adc.assert_called_once() - - -def test_tensorboard_service_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - TensorboardServiceClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', - 'https://www.googleapis.com/auth/cloud-platform.read-only', -), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.TensorboardServiceGrpcTransport, - transports.TensorboardServiceGrpcAsyncIOTransport, - ], -) -def test_tensorboard_service_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform', 'https://www.googleapis.com/auth/cloud-platform.read-only',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.TensorboardServiceGrpcTransport, grpc_helpers), - (transports.TensorboardServiceGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_tensorboard_service_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "aiplatform.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', - 'https://www.googleapis.com/auth/cloud-platform.read-only', -), - scopes=["1", "2"], - default_host="aiplatform.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.TensorboardServiceGrpcTransport, transports.TensorboardServiceGrpcAsyncIOTransport]) -def test_tensorboard_service_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_tensorboard_service_host_no_port(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_tensorboard_service_host_with_port(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:8000' - -def test_tensorboard_service_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.TensorboardServiceGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_tensorboard_service_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.TensorboardServiceGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.TensorboardServiceGrpcTransport, transports.TensorboardServiceGrpcAsyncIOTransport]) -def test_tensorboard_service_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.TensorboardServiceGrpcTransport, transports.TensorboardServiceGrpcAsyncIOTransport]) -def test_tensorboard_service_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_tensorboard_service_grpc_lro_client(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_tensorboard_service_grpc_lro_async_client(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc_asyncio', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsAsyncClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_tensorboard_path(): - project = "squid" - location = "clam" - tensorboard = "whelk" - expected = "projects/{project}/locations/{location}/tensorboards/{tensorboard}".format(project=project, location=location, tensorboard=tensorboard, ) - actual = TensorboardServiceClient.tensorboard_path(project, location, tensorboard) - assert expected == actual - - -def test_parse_tensorboard_path(): - expected = { - "project": "octopus", - "location": "oyster", - "tensorboard": "nudibranch", - } - path = TensorboardServiceClient.tensorboard_path(**expected) - - # Check that the path construction is reversible. - actual = TensorboardServiceClient.parse_tensorboard_path(path) - assert expected == actual - -def test_tensorboard_experiment_path(): - project = "cuttlefish" - location = "mussel" - tensorboard = "winkle" - experiment = "nautilus" - expected = "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}".format(project=project, location=location, tensorboard=tensorboard, experiment=experiment, ) - actual = TensorboardServiceClient.tensorboard_experiment_path(project, location, tensorboard, experiment) - assert expected == actual - - -def test_parse_tensorboard_experiment_path(): - expected = { - "project": "scallop", - "location": "abalone", - "tensorboard": "squid", - "experiment": "clam", - } - path = TensorboardServiceClient.tensorboard_experiment_path(**expected) - - # Check that the path construction is reversible. - actual = TensorboardServiceClient.parse_tensorboard_experiment_path(path) - assert expected == actual - -def test_tensorboard_run_path(): - project = "whelk" - location = "octopus" - tensorboard = "oyster" - experiment = "nudibranch" - run = "cuttlefish" - expected = "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}".format(project=project, location=location, tensorboard=tensorboard, experiment=experiment, run=run, ) - actual = TensorboardServiceClient.tensorboard_run_path(project, location, tensorboard, experiment, run) - assert expected == actual - - -def test_parse_tensorboard_run_path(): - expected = { - "project": "mussel", - "location": "winkle", - "tensorboard": "nautilus", - "experiment": "scallop", - "run": "abalone", - } - path = TensorboardServiceClient.tensorboard_run_path(**expected) - - # Check that the path construction is reversible. - actual = TensorboardServiceClient.parse_tensorboard_run_path(path) - assert expected == actual - -def test_tensorboard_time_series_path(): - project = "squid" - location = "clam" - tensorboard = "whelk" - experiment = "octopus" - run = "oyster" - time_series = "nudibranch" - expected = "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}".format(project=project, location=location, tensorboard=tensorboard, experiment=experiment, run=run, time_series=time_series, ) - actual = TensorboardServiceClient.tensorboard_time_series_path(project, location, tensorboard, experiment, run, time_series) - assert expected == actual - - -def test_parse_tensorboard_time_series_path(): - expected = { - "project": "cuttlefish", - "location": "mussel", - "tensorboard": "winkle", - "experiment": "nautilus", - "run": "scallop", - "time_series": "abalone", - } - path = TensorboardServiceClient.tensorboard_time_series_path(**expected) - - # Check that the path construction is reversible. - actual = TensorboardServiceClient.parse_tensorboard_time_series_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "squid" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = TensorboardServiceClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "clam", - } - path = TensorboardServiceClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = TensorboardServiceClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "whelk" - expected = "folders/{folder}".format(folder=folder, ) - actual = TensorboardServiceClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "octopus", - } - path = TensorboardServiceClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = TensorboardServiceClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "oyster" - expected = "organizations/{organization}".format(organization=organization, ) - actual = TensorboardServiceClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "nudibranch", - } - path = TensorboardServiceClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = TensorboardServiceClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "cuttlefish" - expected = "projects/{project}".format(project=project, ) - actual = TensorboardServiceClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "mussel", - } - path = TensorboardServiceClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = TensorboardServiceClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "winkle" - location = "nautilus" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = TensorboardServiceClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "scallop", - "location": "abalone", - } - path = TensorboardServiceClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = TensorboardServiceClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.TensorboardServiceTransport, '_prep_wrapped_messages') as prep: - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.TensorboardServiceTransport, '_prep_wrapped_messages') as prep: - transport_class = TensorboardServiceClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - -@pytest.mark.asyncio -async def test_transport_close_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: - async with client: - close.assert_not_called() - close.assert_called_once() - -def test_transport_close(): - transports = { - "grpc": "_grpc_channel", - } - - for transport, close_name in transports.items(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: - with client: - close.assert_not_called() - close.assert_called_once() - -def test_client_ctx(): - transports = [ - 'grpc', - ] - for transport in transports: - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - # Test client calls underlying transport. - with mock.patch.object(type(client.transport), "close") as close: - close.assert_not_called() - with client: - pass - close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_vizier_service.py b/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_vizier_service.py deleted file mode 100644 index 7b84e5e7c1..0000000000 --- a/owl-bot-staging/v1/tests/unit/gapic/aiplatform_v1/test_vizier_service.py +++ /dev/null @@ -1,4630 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import future -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import operation_async # type: ignore -from google.api_core import operations_v1 -from google.api_core import path_template -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.aiplatform_v1.services.vizier_service import VizierServiceAsyncClient -from google.cloud.aiplatform_v1.services.vizier_service import VizierServiceClient -from google.cloud.aiplatform_v1.services.vizier_service import pagers -from google.cloud.aiplatform_v1.services.vizier_service import transports -from google.cloud.aiplatform_v1.types import study -from google.cloud.aiplatform_v1.types import study as gca_study -from google.cloud.aiplatform_v1.types import vizier_service -from google.longrunning import operations_pb2 -from google.oauth2 import service_account -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -import google.auth - - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert VizierServiceClient._get_default_mtls_endpoint(None) is None - assert VizierServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert VizierServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert VizierServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert VizierServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert VizierServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - VizierServiceClient, - VizierServiceAsyncClient, -]) -def test_vizier_service_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.VizierServiceGrpcTransport, "grpc"), - (transports.VizierServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_vizier_service_client_service_account_always_use_jwt(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=False) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("client_class", [ - VizierServiceClient, - VizierServiceAsyncClient, -]) -def test_vizier_service_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_vizier_service_client_get_transport_class(): - transport = VizierServiceClient.get_transport_class() - available_transports = [ - transports.VizierServiceGrpcTransport, - ] - assert transport in available_transports - - transport = VizierServiceClient.get_transport_class("grpc") - assert transport == transports.VizierServiceGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (VizierServiceClient, transports.VizierServiceGrpcTransport, "grpc"), - (VizierServiceAsyncClient, transports.VizierServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(VizierServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VizierServiceClient)) -@mock.patch.object(VizierServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VizierServiceAsyncClient)) -def test_vizier_service_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(VizierServiceClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(VizierServiceClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (VizierServiceClient, transports.VizierServiceGrpcTransport, "grpc", "true"), - (VizierServiceAsyncClient, transports.VizierServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (VizierServiceClient, transports.VizierServiceGrpcTransport, "grpc", "false"), - (VizierServiceAsyncClient, transports.VizierServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(VizierServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VizierServiceClient)) -@mock.patch.object(VizierServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VizierServiceAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_vizier_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (VizierServiceClient, transports.VizierServiceGrpcTransport, "grpc"), - (VizierServiceAsyncClient, transports.VizierServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_vizier_service_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (VizierServiceClient, transports.VizierServiceGrpcTransport, "grpc"), - (VizierServiceAsyncClient, transports.VizierServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_vizier_service_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_vizier_service_client_client_options_from_dict(): - with mock.patch('google.cloud.aiplatform_v1.services.vizier_service.transports.VizierServiceGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = VizierServiceClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_create_study(transport: str = 'grpc', request_type=vizier_service.CreateStudyRequest): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_study), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_study.Study( - name='name_value', - display_name='display_name_value', - state=gca_study.Study.State.ACTIVE, - inactive_reason='inactive_reason_value', - ) - response = client.create_study(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.CreateStudyRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_study.Study) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == gca_study.Study.State.ACTIVE - assert response.inactive_reason == 'inactive_reason_value' - - -def test_create_study_from_dict(): - test_create_study(request_type=dict) - - -def test_create_study_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_study), - '__call__') as call: - client.create_study() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.CreateStudyRequest() - - -@pytest.mark.asyncio -async def test_create_study_async(transport: str = 'grpc_asyncio', request_type=vizier_service.CreateStudyRequest): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_study), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_study.Study( - name='name_value', - display_name='display_name_value', - state=gca_study.Study.State.ACTIVE, - inactive_reason='inactive_reason_value', - )) - response = await client.create_study(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.CreateStudyRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_study.Study) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == gca_study.Study.State.ACTIVE - assert response.inactive_reason == 'inactive_reason_value' - - -@pytest.mark.asyncio -async def test_create_study_async_from_dict(): - await test_create_study_async(request_type=dict) - - -def test_create_study_field_headers(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.CreateStudyRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_study), - '__call__') as call: - call.return_value = gca_study.Study() - client.create_study(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_study_field_headers_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.CreateStudyRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_study), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_study.Study()) - await client.create_study(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_study_flattened(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_study), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_study.Study() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_study( - parent='parent_value', - study=gca_study.Study(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].study - mock_val = gca_study.Study(name='name_value') - assert arg == mock_val - - -def test_create_study_flattened_error(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_study( - vizier_service.CreateStudyRequest(), - parent='parent_value', - study=gca_study.Study(name='name_value'), - ) - - -@pytest.mark.asyncio -async def test_create_study_flattened_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_study), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_study.Study() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_study.Study()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_study( - parent='parent_value', - study=gca_study.Study(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].study - mock_val = gca_study.Study(name='name_value') - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_study_flattened_error_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_study( - vizier_service.CreateStudyRequest(), - parent='parent_value', - study=gca_study.Study(name='name_value'), - ) - - -def test_get_study(transport: str = 'grpc', request_type=vizier_service.GetStudyRequest): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_study), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = study.Study( - name='name_value', - display_name='display_name_value', - state=study.Study.State.ACTIVE, - inactive_reason='inactive_reason_value', - ) - response = client.get_study(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.GetStudyRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, study.Study) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == study.Study.State.ACTIVE - assert response.inactive_reason == 'inactive_reason_value' - - -def test_get_study_from_dict(): - test_get_study(request_type=dict) - - -def test_get_study_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_study), - '__call__') as call: - client.get_study() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.GetStudyRequest() - - -@pytest.mark.asyncio -async def test_get_study_async(transport: str = 'grpc_asyncio', request_type=vizier_service.GetStudyRequest): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_study), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(study.Study( - name='name_value', - display_name='display_name_value', - state=study.Study.State.ACTIVE, - inactive_reason='inactive_reason_value', - )) - response = await client.get_study(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.GetStudyRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, study.Study) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == study.Study.State.ACTIVE - assert response.inactive_reason == 'inactive_reason_value' - - -@pytest.mark.asyncio -async def test_get_study_async_from_dict(): - await test_get_study_async(request_type=dict) - - -def test_get_study_field_headers(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.GetStudyRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_study), - '__call__') as call: - call.return_value = study.Study() - client.get_study(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_study_field_headers_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.GetStudyRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_study), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Study()) - await client.get_study(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_study_flattened(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_study), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = study.Study() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_study( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_study_flattened_error(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_study( - vizier_service.GetStudyRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_study_flattened_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_study), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = study.Study() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Study()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_study( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_study_flattened_error_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_study( - vizier_service.GetStudyRequest(), - name='name_value', - ) - - -def test_list_studies(transport: str = 'grpc', request_type=vizier_service.ListStudiesRequest): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_studies), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = vizier_service.ListStudiesResponse( - next_page_token='next_page_token_value', - ) - response = client.list_studies(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.ListStudiesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListStudiesPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_studies_from_dict(): - test_list_studies(request_type=dict) - - -def test_list_studies_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_studies), - '__call__') as call: - client.list_studies() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.ListStudiesRequest() - - -@pytest.mark.asyncio -async def test_list_studies_async(transport: str = 'grpc_asyncio', request_type=vizier_service.ListStudiesRequest): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_studies), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListStudiesResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_studies(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.ListStudiesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListStudiesAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_studies_async_from_dict(): - await test_list_studies_async(request_type=dict) - - -def test_list_studies_field_headers(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.ListStudiesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_studies), - '__call__') as call: - call.return_value = vizier_service.ListStudiesResponse() - client.list_studies(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_studies_field_headers_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.ListStudiesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_studies), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListStudiesResponse()) - await client.list_studies(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_studies_flattened(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_studies), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = vizier_service.ListStudiesResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_studies( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_studies_flattened_error(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_studies( - vizier_service.ListStudiesRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_studies_flattened_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_studies), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = vizier_service.ListStudiesResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListStudiesResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_studies( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_studies_flattened_error_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_studies( - vizier_service.ListStudiesRequest(), - parent='parent_value', - ) - - -def test_list_studies_pager(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_studies), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - vizier_service.ListStudiesResponse( - studies=[ - study.Study(), - study.Study(), - study.Study(), - ], - next_page_token='abc', - ), - vizier_service.ListStudiesResponse( - studies=[], - next_page_token='def', - ), - vizier_service.ListStudiesResponse( - studies=[ - study.Study(), - ], - next_page_token='ghi', - ), - vizier_service.ListStudiesResponse( - studies=[ - study.Study(), - study.Study(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_studies(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, study.Study) - for i in results) - -def test_list_studies_pages(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_studies), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - vizier_service.ListStudiesResponse( - studies=[ - study.Study(), - study.Study(), - study.Study(), - ], - next_page_token='abc', - ), - vizier_service.ListStudiesResponse( - studies=[], - next_page_token='def', - ), - vizier_service.ListStudiesResponse( - studies=[ - study.Study(), - ], - next_page_token='ghi', - ), - vizier_service.ListStudiesResponse( - studies=[ - study.Study(), - study.Study(), - ], - ), - RuntimeError, - ) - pages = list(client.list_studies(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_studies_async_pager(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_studies), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - vizier_service.ListStudiesResponse( - studies=[ - study.Study(), - study.Study(), - study.Study(), - ], - next_page_token='abc', - ), - vizier_service.ListStudiesResponse( - studies=[], - next_page_token='def', - ), - vizier_service.ListStudiesResponse( - studies=[ - study.Study(), - ], - next_page_token='ghi', - ), - vizier_service.ListStudiesResponse( - studies=[ - study.Study(), - study.Study(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_studies(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, study.Study) - for i in responses) - -@pytest.mark.asyncio -async def test_list_studies_async_pages(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_studies), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - vizier_service.ListStudiesResponse( - studies=[ - study.Study(), - study.Study(), - study.Study(), - ], - next_page_token='abc', - ), - vizier_service.ListStudiesResponse( - studies=[], - next_page_token='def', - ), - vizier_service.ListStudiesResponse( - studies=[ - study.Study(), - ], - next_page_token='ghi', - ), - vizier_service.ListStudiesResponse( - studies=[ - study.Study(), - study.Study(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_studies(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_delete_study(transport: str = 'grpc', request_type=vizier_service.DeleteStudyRequest): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_study), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.delete_study(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.DeleteStudyRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -def test_delete_study_from_dict(): - test_delete_study(request_type=dict) - - -def test_delete_study_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_study), - '__call__') as call: - client.delete_study() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.DeleteStudyRequest() - - -@pytest.mark.asyncio -async def test_delete_study_async(transport: str = 'grpc_asyncio', request_type=vizier_service.DeleteStudyRequest): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_study), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.delete_study(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.DeleteStudyRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_delete_study_async_from_dict(): - await test_delete_study_async(request_type=dict) - - -def test_delete_study_field_headers(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.DeleteStudyRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_study), - '__call__') as call: - call.return_value = None - client.delete_study(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_study_field_headers_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.DeleteStudyRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_study), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.delete_study(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_study_flattened(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_study), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_study( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_delete_study_flattened_error(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_study( - vizier_service.DeleteStudyRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_study_flattened_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_study), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_study( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_study_flattened_error_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_study( - vizier_service.DeleteStudyRequest(), - name='name_value', - ) - - -def test_lookup_study(transport: str = 'grpc', request_type=vizier_service.LookupStudyRequest): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.lookup_study), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = study.Study( - name='name_value', - display_name='display_name_value', - state=study.Study.State.ACTIVE, - inactive_reason='inactive_reason_value', - ) - response = client.lookup_study(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.LookupStudyRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, study.Study) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == study.Study.State.ACTIVE - assert response.inactive_reason == 'inactive_reason_value' - - -def test_lookup_study_from_dict(): - test_lookup_study(request_type=dict) - - -def test_lookup_study_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.lookup_study), - '__call__') as call: - client.lookup_study() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.LookupStudyRequest() - - -@pytest.mark.asyncio -async def test_lookup_study_async(transport: str = 'grpc_asyncio', request_type=vizier_service.LookupStudyRequest): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.lookup_study), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(study.Study( - name='name_value', - display_name='display_name_value', - state=study.Study.State.ACTIVE, - inactive_reason='inactive_reason_value', - )) - response = await client.lookup_study(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.LookupStudyRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, study.Study) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == study.Study.State.ACTIVE - assert response.inactive_reason == 'inactive_reason_value' - - -@pytest.mark.asyncio -async def test_lookup_study_async_from_dict(): - await test_lookup_study_async(request_type=dict) - - -def test_lookup_study_field_headers(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.LookupStudyRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.lookup_study), - '__call__') as call: - call.return_value = study.Study() - client.lookup_study(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_lookup_study_field_headers_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.LookupStudyRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.lookup_study), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Study()) - await client.lookup_study(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_lookup_study_flattened(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.lookup_study), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = study.Study() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.lookup_study( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_lookup_study_flattened_error(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.lookup_study( - vizier_service.LookupStudyRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_lookup_study_flattened_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.lookup_study), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = study.Study() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Study()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.lookup_study( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_lookup_study_flattened_error_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.lookup_study( - vizier_service.LookupStudyRequest(), - parent='parent_value', - ) - - -def test_suggest_trials(transport: str = 'grpc', request_type=vizier_service.SuggestTrialsRequest): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.suggest_trials), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.suggest_trials(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.SuggestTrialsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_suggest_trials_from_dict(): - test_suggest_trials(request_type=dict) - - -def test_suggest_trials_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.suggest_trials), - '__call__') as call: - client.suggest_trials() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.SuggestTrialsRequest() - - -@pytest.mark.asyncio -async def test_suggest_trials_async(transport: str = 'grpc_asyncio', request_type=vizier_service.SuggestTrialsRequest): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.suggest_trials), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.suggest_trials(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.SuggestTrialsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_suggest_trials_async_from_dict(): - await test_suggest_trials_async(request_type=dict) - - -def test_suggest_trials_field_headers(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.SuggestTrialsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.suggest_trials), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.suggest_trials(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_suggest_trials_field_headers_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.SuggestTrialsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.suggest_trials), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.suggest_trials(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_trial(transport: str = 'grpc', request_type=vizier_service.CreateTrialRequest): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_trial), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = study.Trial( - name='name_value', - id='id_value', - state=study.Trial.State.REQUESTED, - client_id='client_id_value', - infeasible_reason='infeasible_reason_value', - custom_job='custom_job_value', - ) - response = client.create_trial(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.CreateTrialRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, study.Trial) - assert response.name == 'name_value' - assert response.id == 'id_value' - assert response.state == study.Trial.State.REQUESTED - assert response.client_id == 'client_id_value' - assert response.infeasible_reason == 'infeasible_reason_value' - assert response.custom_job == 'custom_job_value' - - -def test_create_trial_from_dict(): - test_create_trial(request_type=dict) - - -def test_create_trial_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_trial), - '__call__') as call: - client.create_trial() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.CreateTrialRequest() - - -@pytest.mark.asyncio -async def test_create_trial_async(transport: str = 'grpc_asyncio', request_type=vizier_service.CreateTrialRequest): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_trial), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(study.Trial( - name='name_value', - id='id_value', - state=study.Trial.State.REQUESTED, - client_id='client_id_value', - infeasible_reason='infeasible_reason_value', - custom_job='custom_job_value', - )) - response = await client.create_trial(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.CreateTrialRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, study.Trial) - assert response.name == 'name_value' - assert response.id == 'id_value' - assert response.state == study.Trial.State.REQUESTED - assert response.client_id == 'client_id_value' - assert response.infeasible_reason == 'infeasible_reason_value' - assert response.custom_job == 'custom_job_value' - - -@pytest.mark.asyncio -async def test_create_trial_async_from_dict(): - await test_create_trial_async(request_type=dict) - - -def test_create_trial_field_headers(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.CreateTrialRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_trial), - '__call__') as call: - call.return_value = study.Trial() - client.create_trial(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_trial_field_headers_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.CreateTrialRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_trial), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Trial()) - await client.create_trial(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_trial_flattened(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_trial), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = study.Trial() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_trial( - parent='parent_value', - trial=study.Trial(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].trial - mock_val = study.Trial(name='name_value') - assert arg == mock_val - - -def test_create_trial_flattened_error(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_trial( - vizier_service.CreateTrialRequest(), - parent='parent_value', - trial=study.Trial(name='name_value'), - ) - - -@pytest.mark.asyncio -async def test_create_trial_flattened_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_trial), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = study.Trial() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Trial()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_trial( - parent='parent_value', - trial=study.Trial(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].trial - mock_val = study.Trial(name='name_value') - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_trial_flattened_error_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_trial( - vizier_service.CreateTrialRequest(), - parent='parent_value', - trial=study.Trial(name='name_value'), - ) - - -def test_get_trial(transport: str = 'grpc', request_type=vizier_service.GetTrialRequest): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_trial), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = study.Trial( - name='name_value', - id='id_value', - state=study.Trial.State.REQUESTED, - client_id='client_id_value', - infeasible_reason='infeasible_reason_value', - custom_job='custom_job_value', - ) - response = client.get_trial(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.GetTrialRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, study.Trial) - assert response.name == 'name_value' - assert response.id == 'id_value' - assert response.state == study.Trial.State.REQUESTED - assert response.client_id == 'client_id_value' - assert response.infeasible_reason == 'infeasible_reason_value' - assert response.custom_job == 'custom_job_value' - - -def test_get_trial_from_dict(): - test_get_trial(request_type=dict) - - -def test_get_trial_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_trial), - '__call__') as call: - client.get_trial() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.GetTrialRequest() - - -@pytest.mark.asyncio -async def test_get_trial_async(transport: str = 'grpc_asyncio', request_type=vizier_service.GetTrialRequest): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_trial), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(study.Trial( - name='name_value', - id='id_value', - state=study.Trial.State.REQUESTED, - client_id='client_id_value', - infeasible_reason='infeasible_reason_value', - custom_job='custom_job_value', - )) - response = await client.get_trial(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.GetTrialRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, study.Trial) - assert response.name == 'name_value' - assert response.id == 'id_value' - assert response.state == study.Trial.State.REQUESTED - assert response.client_id == 'client_id_value' - assert response.infeasible_reason == 'infeasible_reason_value' - assert response.custom_job == 'custom_job_value' - - -@pytest.mark.asyncio -async def test_get_trial_async_from_dict(): - await test_get_trial_async(request_type=dict) - - -def test_get_trial_field_headers(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.GetTrialRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_trial), - '__call__') as call: - call.return_value = study.Trial() - client.get_trial(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_trial_field_headers_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.GetTrialRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_trial), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Trial()) - await client.get_trial(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_trial_flattened(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_trial), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = study.Trial() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_trial( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_trial_flattened_error(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_trial( - vizier_service.GetTrialRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_trial_flattened_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_trial), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = study.Trial() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Trial()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_trial( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_trial_flattened_error_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_trial( - vizier_service.GetTrialRequest(), - name='name_value', - ) - - -def test_list_trials(transport: str = 'grpc', request_type=vizier_service.ListTrialsRequest): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_trials), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = vizier_service.ListTrialsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_trials(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.ListTrialsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListTrialsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_trials_from_dict(): - test_list_trials(request_type=dict) - - -def test_list_trials_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_trials), - '__call__') as call: - client.list_trials() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.ListTrialsRequest() - - -@pytest.mark.asyncio -async def test_list_trials_async(transport: str = 'grpc_asyncio', request_type=vizier_service.ListTrialsRequest): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_trials), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListTrialsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_trials(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.ListTrialsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListTrialsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_trials_async_from_dict(): - await test_list_trials_async(request_type=dict) - - -def test_list_trials_field_headers(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.ListTrialsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_trials), - '__call__') as call: - call.return_value = vizier_service.ListTrialsResponse() - client.list_trials(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_trials_field_headers_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.ListTrialsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_trials), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListTrialsResponse()) - await client.list_trials(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_trials_flattened(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_trials), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = vizier_service.ListTrialsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_trials( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_trials_flattened_error(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_trials( - vizier_service.ListTrialsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_trials_flattened_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_trials), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = vizier_service.ListTrialsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListTrialsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_trials( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_trials_flattened_error_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_trials( - vizier_service.ListTrialsRequest(), - parent='parent_value', - ) - - -def test_list_trials_pager(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_trials), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - vizier_service.ListTrialsResponse( - trials=[ - study.Trial(), - study.Trial(), - study.Trial(), - ], - next_page_token='abc', - ), - vizier_service.ListTrialsResponse( - trials=[], - next_page_token='def', - ), - vizier_service.ListTrialsResponse( - trials=[ - study.Trial(), - ], - next_page_token='ghi', - ), - vizier_service.ListTrialsResponse( - trials=[ - study.Trial(), - study.Trial(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_trials(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, study.Trial) - for i in results) - -def test_list_trials_pages(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_trials), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - vizier_service.ListTrialsResponse( - trials=[ - study.Trial(), - study.Trial(), - study.Trial(), - ], - next_page_token='abc', - ), - vizier_service.ListTrialsResponse( - trials=[], - next_page_token='def', - ), - vizier_service.ListTrialsResponse( - trials=[ - study.Trial(), - ], - next_page_token='ghi', - ), - vizier_service.ListTrialsResponse( - trials=[ - study.Trial(), - study.Trial(), - ], - ), - RuntimeError, - ) - pages = list(client.list_trials(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_trials_async_pager(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_trials), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - vizier_service.ListTrialsResponse( - trials=[ - study.Trial(), - study.Trial(), - study.Trial(), - ], - next_page_token='abc', - ), - vizier_service.ListTrialsResponse( - trials=[], - next_page_token='def', - ), - vizier_service.ListTrialsResponse( - trials=[ - study.Trial(), - ], - next_page_token='ghi', - ), - vizier_service.ListTrialsResponse( - trials=[ - study.Trial(), - study.Trial(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_trials(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, study.Trial) - for i in responses) - -@pytest.mark.asyncio -async def test_list_trials_async_pages(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_trials), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - vizier_service.ListTrialsResponse( - trials=[ - study.Trial(), - study.Trial(), - study.Trial(), - ], - next_page_token='abc', - ), - vizier_service.ListTrialsResponse( - trials=[], - next_page_token='def', - ), - vizier_service.ListTrialsResponse( - trials=[ - study.Trial(), - ], - next_page_token='ghi', - ), - vizier_service.ListTrialsResponse( - trials=[ - study.Trial(), - study.Trial(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_trials(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_add_trial_measurement(transport: str = 'grpc', request_type=vizier_service.AddTrialMeasurementRequest): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_trial_measurement), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = study.Trial( - name='name_value', - id='id_value', - state=study.Trial.State.REQUESTED, - client_id='client_id_value', - infeasible_reason='infeasible_reason_value', - custom_job='custom_job_value', - ) - response = client.add_trial_measurement(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.AddTrialMeasurementRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, study.Trial) - assert response.name == 'name_value' - assert response.id == 'id_value' - assert response.state == study.Trial.State.REQUESTED - assert response.client_id == 'client_id_value' - assert response.infeasible_reason == 'infeasible_reason_value' - assert response.custom_job == 'custom_job_value' - - -def test_add_trial_measurement_from_dict(): - test_add_trial_measurement(request_type=dict) - - -def test_add_trial_measurement_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_trial_measurement), - '__call__') as call: - client.add_trial_measurement() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.AddTrialMeasurementRequest() - - -@pytest.mark.asyncio -async def test_add_trial_measurement_async(transport: str = 'grpc_asyncio', request_type=vizier_service.AddTrialMeasurementRequest): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_trial_measurement), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(study.Trial( - name='name_value', - id='id_value', - state=study.Trial.State.REQUESTED, - client_id='client_id_value', - infeasible_reason='infeasible_reason_value', - custom_job='custom_job_value', - )) - response = await client.add_trial_measurement(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.AddTrialMeasurementRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, study.Trial) - assert response.name == 'name_value' - assert response.id == 'id_value' - assert response.state == study.Trial.State.REQUESTED - assert response.client_id == 'client_id_value' - assert response.infeasible_reason == 'infeasible_reason_value' - assert response.custom_job == 'custom_job_value' - - -@pytest.mark.asyncio -async def test_add_trial_measurement_async_from_dict(): - await test_add_trial_measurement_async(request_type=dict) - - -def test_add_trial_measurement_field_headers(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.AddTrialMeasurementRequest() - - request.trial_name = 'trial_name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_trial_measurement), - '__call__') as call: - call.return_value = study.Trial() - client.add_trial_measurement(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'trial_name=trial_name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_add_trial_measurement_field_headers_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.AddTrialMeasurementRequest() - - request.trial_name = 'trial_name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_trial_measurement), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Trial()) - await client.add_trial_measurement(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'trial_name=trial_name/value', - ) in kw['metadata'] - - -def test_complete_trial(transport: str = 'grpc', request_type=vizier_service.CompleteTrialRequest): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.complete_trial), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = study.Trial( - name='name_value', - id='id_value', - state=study.Trial.State.REQUESTED, - client_id='client_id_value', - infeasible_reason='infeasible_reason_value', - custom_job='custom_job_value', - ) - response = client.complete_trial(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.CompleteTrialRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, study.Trial) - assert response.name == 'name_value' - assert response.id == 'id_value' - assert response.state == study.Trial.State.REQUESTED - assert response.client_id == 'client_id_value' - assert response.infeasible_reason == 'infeasible_reason_value' - assert response.custom_job == 'custom_job_value' - - -def test_complete_trial_from_dict(): - test_complete_trial(request_type=dict) - - -def test_complete_trial_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.complete_trial), - '__call__') as call: - client.complete_trial() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.CompleteTrialRequest() - - -@pytest.mark.asyncio -async def test_complete_trial_async(transport: str = 'grpc_asyncio', request_type=vizier_service.CompleteTrialRequest): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.complete_trial), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(study.Trial( - name='name_value', - id='id_value', - state=study.Trial.State.REQUESTED, - client_id='client_id_value', - infeasible_reason='infeasible_reason_value', - custom_job='custom_job_value', - )) - response = await client.complete_trial(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.CompleteTrialRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, study.Trial) - assert response.name == 'name_value' - assert response.id == 'id_value' - assert response.state == study.Trial.State.REQUESTED - assert response.client_id == 'client_id_value' - assert response.infeasible_reason == 'infeasible_reason_value' - assert response.custom_job == 'custom_job_value' - - -@pytest.mark.asyncio -async def test_complete_trial_async_from_dict(): - await test_complete_trial_async(request_type=dict) - - -def test_complete_trial_field_headers(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.CompleteTrialRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.complete_trial), - '__call__') as call: - call.return_value = study.Trial() - client.complete_trial(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_complete_trial_field_headers_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.CompleteTrialRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.complete_trial), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Trial()) - await client.complete_trial(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_trial(transport: str = 'grpc', request_type=vizier_service.DeleteTrialRequest): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_trial), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.delete_trial(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.DeleteTrialRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -def test_delete_trial_from_dict(): - test_delete_trial(request_type=dict) - - -def test_delete_trial_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_trial), - '__call__') as call: - client.delete_trial() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.DeleteTrialRequest() - - -@pytest.mark.asyncio -async def test_delete_trial_async(transport: str = 'grpc_asyncio', request_type=vizier_service.DeleteTrialRequest): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_trial), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.delete_trial(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.DeleteTrialRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_delete_trial_async_from_dict(): - await test_delete_trial_async(request_type=dict) - - -def test_delete_trial_field_headers(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.DeleteTrialRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_trial), - '__call__') as call: - call.return_value = None - client.delete_trial(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_trial_field_headers_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.DeleteTrialRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_trial), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.delete_trial(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_trial_flattened(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_trial), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_trial( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_delete_trial_flattened_error(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_trial( - vizier_service.DeleteTrialRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_trial_flattened_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_trial), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_trial( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_trial_flattened_error_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_trial( - vizier_service.DeleteTrialRequest(), - name='name_value', - ) - - -def test_check_trial_early_stopping_state(transport: str = 'grpc', request_type=vizier_service.CheckTrialEarlyStoppingStateRequest): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.check_trial_early_stopping_state), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.check_trial_early_stopping_state(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.CheckTrialEarlyStoppingStateRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_check_trial_early_stopping_state_from_dict(): - test_check_trial_early_stopping_state(request_type=dict) - - -def test_check_trial_early_stopping_state_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.check_trial_early_stopping_state), - '__call__') as call: - client.check_trial_early_stopping_state() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.CheckTrialEarlyStoppingStateRequest() - - -@pytest.mark.asyncio -async def test_check_trial_early_stopping_state_async(transport: str = 'grpc_asyncio', request_type=vizier_service.CheckTrialEarlyStoppingStateRequest): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.check_trial_early_stopping_state), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.check_trial_early_stopping_state(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.CheckTrialEarlyStoppingStateRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_check_trial_early_stopping_state_async_from_dict(): - await test_check_trial_early_stopping_state_async(request_type=dict) - - -def test_check_trial_early_stopping_state_field_headers(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.CheckTrialEarlyStoppingStateRequest() - - request.trial_name = 'trial_name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.check_trial_early_stopping_state), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.check_trial_early_stopping_state(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'trial_name=trial_name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_check_trial_early_stopping_state_field_headers_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.CheckTrialEarlyStoppingStateRequest() - - request.trial_name = 'trial_name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.check_trial_early_stopping_state), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.check_trial_early_stopping_state(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'trial_name=trial_name/value', - ) in kw['metadata'] - - -def test_stop_trial(transport: str = 'grpc', request_type=vizier_service.StopTrialRequest): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.stop_trial), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = study.Trial( - name='name_value', - id='id_value', - state=study.Trial.State.REQUESTED, - client_id='client_id_value', - infeasible_reason='infeasible_reason_value', - custom_job='custom_job_value', - ) - response = client.stop_trial(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.StopTrialRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, study.Trial) - assert response.name == 'name_value' - assert response.id == 'id_value' - assert response.state == study.Trial.State.REQUESTED - assert response.client_id == 'client_id_value' - assert response.infeasible_reason == 'infeasible_reason_value' - assert response.custom_job == 'custom_job_value' - - -def test_stop_trial_from_dict(): - test_stop_trial(request_type=dict) - - -def test_stop_trial_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.stop_trial), - '__call__') as call: - client.stop_trial() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.StopTrialRequest() - - -@pytest.mark.asyncio -async def test_stop_trial_async(transport: str = 'grpc_asyncio', request_type=vizier_service.StopTrialRequest): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.stop_trial), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(study.Trial( - name='name_value', - id='id_value', - state=study.Trial.State.REQUESTED, - client_id='client_id_value', - infeasible_reason='infeasible_reason_value', - custom_job='custom_job_value', - )) - response = await client.stop_trial(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.StopTrialRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, study.Trial) - assert response.name == 'name_value' - assert response.id == 'id_value' - assert response.state == study.Trial.State.REQUESTED - assert response.client_id == 'client_id_value' - assert response.infeasible_reason == 'infeasible_reason_value' - assert response.custom_job == 'custom_job_value' - - -@pytest.mark.asyncio -async def test_stop_trial_async_from_dict(): - await test_stop_trial_async(request_type=dict) - - -def test_stop_trial_field_headers(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.StopTrialRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.stop_trial), - '__call__') as call: - call.return_value = study.Trial() - client.stop_trial(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_stop_trial_field_headers_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.StopTrialRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.stop_trial), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Trial()) - await client.stop_trial(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_list_optimal_trials(transport: str = 'grpc', request_type=vizier_service.ListOptimalTrialsRequest): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_optimal_trials), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = vizier_service.ListOptimalTrialsResponse( - ) - response = client.list_optimal_trials(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.ListOptimalTrialsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, vizier_service.ListOptimalTrialsResponse) - - -def test_list_optimal_trials_from_dict(): - test_list_optimal_trials(request_type=dict) - - -def test_list_optimal_trials_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_optimal_trials), - '__call__') as call: - client.list_optimal_trials() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.ListOptimalTrialsRequest() - - -@pytest.mark.asyncio -async def test_list_optimal_trials_async(transport: str = 'grpc_asyncio', request_type=vizier_service.ListOptimalTrialsRequest): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_optimal_trials), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListOptimalTrialsResponse( - )) - response = await client.list_optimal_trials(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.ListOptimalTrialsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, vizier_service.ListOptimalTrialsResponse) - - -@pytest.mark.asyncio -async def test_list_optimal_trials_async_from_dict(): - await test_list_optimal_trials_async(request_type=dict) - - -def test_list_optimal_trials_field_headers(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.ListOptimalTrialsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_optimal_trials), - '__call__') as call: - call.return_value = vizier_service.ListOptimalTrialsResponse() - client.list_optimal_trials(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_optimal_trials_field_headers_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.ListOptimalTrialsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_optimal_trials), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListOptimalTrialsResponse()) - await client.list_optimal_trials(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_optimal_trials_flattened(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_optimal_trials), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = vizier_service.ListOptimalTrialsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_optimal_trials( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_optimal_trials_flattened_error(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_optimal_trials( - vizier_service.ListOptimalTrialsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_optimal_trials_flattened_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_optimal_trials), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = vizier_service.ListOptimalTrialsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListOptimalTrialsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_optimal_trials( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_optimal_trials_flattened_error_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_optimal_trials( - vizier_service.ListOptimalTrialsRequest(), - parent='parent_value', - ) - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.VizierServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.VizierServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = VizierServiceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.VizierServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = VizierServiceClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.VizierServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = VizierServiceClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.VizierServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.VizierServiceGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.VizierServiceGrpcTransport, - transports.VizierServiceGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.VizierServiceGrpcTransport, - ) - -def test_vizier_service_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.VizierServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_vizier_service_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.aiplatform_v1.services.vizier_service.transports.VizierServiceTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.VizierServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'create_study', - 'get_study', - 'list_studies', - 'delete_study', - 'lookup_study', - 'suggest_trials', - 'create_trial', - 'get_trial', - 'list_trials', - 'add_trial_measurement', - 'complete_trial', - 'delete_trial', - 'check_trial_early_stopping_state', - 'stop_trial', - 'list_optimal_trials', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - with pytest.raises(NotImplementedError): - transport.close() - - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client - - -def test_vizier_service_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1.services.vizier_service.transports.VizierServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.VizierServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -def test_vizier_service_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1.services.vizier_service.transports.VizierServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.VizierServiceTransport() - adc.assert_called_once() - - -def test_vizier_service_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - VizierServiceClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.VizierServiceGrpcTransport, - transports.VizierServiceGrpcAsyncIOTransport, - ], -) -def test_vizier_service_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.VizierServiceGrpcTransport, grpc_helpers), - (transports.VizierServiceGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_vizier_service_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "aiplatform.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="aiplatform.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.VizierServiceGrpcTransport, transports.VizierServiceGrpcAsyncIOTransport]) -def test_vizier_service_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_vizier_service_host_no_port(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_vizier_service_host_with_port(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:8000' - -def test_vizier_service_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.VizierServiceGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_vizier_service_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.VizierServiceGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.VizierServiceGrpcTransport, transports.VizierServiceGrpcAsyncIOTransport]) -def test_vizier_service_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.VizierServiceGrpcTransport, transports.VizierServiceGrpcAsyncIOTransport]) -def test_vizier_service_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_vizier_service_grpc_lro_client(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_vizier_service_grpc_lro_async_client(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc_asyncio', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsAsyncClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_custom_job_path(): - project = "squid" - location = "clam" - custom_job = "whelk" - expected = "projects/{project}/locations/{location}/customJobs/{custom_job}".format(project=project, location=location, custom_job=custom_job, ) - actual = VizierServiceClient.custom_job_path(project, location, custom_job) - assert expected == actual - - -def test_parse_custom_job_path(): - expected = { - "project": "octopus", - "location": "oyster", - "custom_job": "nudibranch", - } - path = VizierServiceClient.custom_job_path(**expected) - - # Check that the path construction is reversible. - actual = VizierServiceClient.parse_custom_job_path(path) - assert expected == actual - -def test_study_path(): - project = "cuttlefish" - location = "mussel" - study = "winkle" - expected = "projects/{project}/locations/{location}/studies/{study}".format(project=project, location=location, study=study, ) - actual = VizierServiceClient.study_path(project, location, study) - assert expected == actual - - -def test_parse_study_path(): - expected = { - "project": "nautilus", - "location": "scallop", - "study": "abalone", - } - path = VizierServiceClient.study_path(**expected) - - # Check that the path construction is reversible. - actual = VizierServiceClient.parse_study_path(path) - assert expected == actual - -def test_trial_path(): - project = "squid" - location = "clam" - study = "whelk" - trial = "octopus" - expected = "projects/{project}/locations/{location}/studies/{study}/trials/{trial}".format(project=project, location=location, study=study, trial=trial, ) - actual = VizierServiceClient.trial_path(project, location, study, trial) - assert expected == actual - - -def test_parse_trial_path(): - expected = { - "project": "oyster", - "location": "nudibranch", - "study": "cuttlefish", - "trial": "mussel", - } - path = VizierServiceClient.trial_path(**expected) - - # Check that the path construction is reversible. - actual = VizierServiceClient.parse_trial_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "winkle" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = VizierServiceClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "nautilus", - } - path = VizierServiceClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = VizierServiceClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "scallop" - expected = "folders/{folder}".format(folder=folder, ) - actual = VizierServiceClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "abalone", - } - path = VizierServiceClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = VizierServiceClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "squid" - expected = "organizations/{organization}".format(organization=organization, ) - actual = VizierServiceClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "clam", - } - path = VizierServiceClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = VizierServiceClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "whelk" - expected = "projects/{project}".format(project=project, ) - actual = VizierServiceClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "octopus", - } - path = VizierServiceClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = VizierServiceClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "oyster" - location = "nudibranch" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = VizierServiceClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "cuttlefish", - "location": "mussel", - } - path = VizierServiceClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = VizierServiceClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.VizierServiceTransport, '_prep_wrapped_messages') as prep: - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.VizierServiceTransport, '_prep_wrapped_messages') as prep: - transport_class = VizierServiceClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - -@pytest.mark.asyncio -async def test_transport_close_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: - async with client: - close.assert_not_called() - close.assert_called_once() - -def test_transport_close(): - transports = { - "grpc": "_grpc_channel", - } - - for transport, close_name in transports.items(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: - with client: - close.assert_not_called() - close.assert_called_once() - -def test_client_ctx(): - transports = [ - 'grpc', - ] - for transport in transports: - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - # Test client calls underlying transport. - with mock.patch.object(type(client.transport), "close") as close: - close.assert_not_called() - with client: - pass - close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/definition_v1/__init__.py b/owl-bot-staging/v1/tests/unit/gapic/definition_v1/__init__.py deleted file mode 100644 index b54a5fcc42..0000000000 --- a/owl-bot-staging/v1/tests/unit/gapic/definition_v1/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1/tests/unit/gapic/instance_v1/__init__.py b/owl-bot-staging/v1/tests/unit/gapic/instance_v1/__init__.py deleted file mode 100644 index b54a5fcc42..0000000000 --- a/owl-bot-staging/v1/tests/unit/gapic/instance_v1/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1/tests/unit/gapic/params_v1/__init__.py b/owl-bot-staging/v1/tests/unit/gapic/params_v1/__init__.py deleted file mode 100644 index b54a5fcc42..0000000000 --- a/owl-bot-staging/v1/tests/unit/gapic/params_v1/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1/tests/unit/gapic/prediction_v1/__init__.py b/owl-bot-staging/v1/tests/unit/gapic/prediction_v1/__init__.py deleted file mode 100644 index b54a5fcc42..0000000000 --- a/owl-bot-staging/v1/tests/unit/gapic/prediction_v1/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1beta1/.coveragerc b/owl-bot-staging/v1beta1/.coveragerc deleted file mode 100644 index a328166917..0000000000 --- a/owl-bot-staging/v1beta1/.coveragerc +++ /dev/null @@ -1,17 +0,0 @@ -[run] -branch = True - -[report] -show_missing = True -omit = - google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/__init__.py -exclude_lines = - # Re-enable the standard pragma - pragma: NO COVER - # Ignore debug-only repr - def __repr__ - # Ignore pkg_resources exceptions. - # This is added at the module level as a safeguard for if someone - # generates the code and tries to run it without pip installing. This - # makes it virtually impossible to test properly. - except pkg_resources.DistributionNotFound diff --git a/owl-bot-staging/v1beta1/MANIFEST.in b/owl-bot-staging/v1beta1/MANIFEST.in deleted file mode 100644 index e386e05fec..0000000000 --- a/owl-bot-staging/v1beta1/MANIFEST.in +++ /dev/null @@ -1,2 +0,0 @@ -recursive-include google/cloud/aiplatform/v1beta1/schema/trainingjob/definition *.py -recursive-include google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1 *.py diff --git a/owl-bot-staging/v1beta1/README.rst b/owl-bot-staging/v1beta1/README.rst deleted file mode 100644 index c0e4d26d4d..0000000000 --- a/owl-bot-staging/v1beta1/README.rst +++ /dev/null @@ -1,49 +0,0 @@ -Python Client for Google Cloud Aiplatform V1beta1 Schema Trainingjob Definition API -================================================= - -Quick Start ------------ - -In order to use this library, you first need to go through the following steps: - -1. `Select or create a Cloud Platform project.`_ -2. `Enable billing for your project.`_ -3. Enable the Google Cloud Aiplatform V1beta1 Schema Trainingjob Definition API. -4. `Setup Authentication.`_ - -.. _Select or create a Cloud Platform project.: https://console.cloud.google.com/project -.. _Enable billing for your project.: https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project -.. _Setup Authentication.: https://googleapis.dev/python/google-api-core/latest/auth.html - -Installation -~~~~~~~~~~~~ - -Install this library in a `virtualenv`_ using pip. `virtualenv`_ is a tool to -create isolated Python environments. The basic problem it addresses is one of -dependencies and versions, and indirectly permissions. - -With `virtualenv`_, it's possible to install this library without needing system -install permissions, and without clashing with the installed system -dependencies. - -.. _`virtualenv`: https://virtualenv.pypa.io/en/latest/ - - -Mac/Linux -^^^^^^^^^ - -.. code-block:: console - - python3 -m venv - source /bin/activate - /bin/pip install /path/to/library - - -Windows -^^^^^^^ - -.. code-block:: console - - python3 -m venv - \Scripts\activate - \Scripts\pip.exe install \path\to\library diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/dataset_service.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/dataset_service.rst deleted file mode 100644 index 43fad30e55..0000000000 --- a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/dataset_service.rst +++ /dev/null @@ -1,10 +0,0 @@ -DatasetService --------------------------------- - -.. automodule:: google.cloud.aiplatform_v1beta1.services.dataset_service - :members: - :inherited-members: - -.. automodule:: google.cloud.aiplatform_v1beta1.services.dataset_service.pagers - :members: - :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/endpoint_service.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/endpoint_service.rst deleted file mode 100644 index 022799a059..0000000000 --- a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/endpoint_service.rst +++ /dev/null @@ -1,10 +0,0 @@ -EndpointService ---------------------------------- - -.. automodule:: google.cloud.aiplatform_v1beta1.services.endpoint_service - :members: - :inherited-members: - -.. automodule:: google.cloud.aiplatform_v1beta1.services.endpoint_service.pagers - :members: - :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/featurestore_online_serving_service.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/featurestore_online_serving_service.rst deleted file mode 100644 index 21013eb751..0000000000 --- a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/featurestore_online_serving_service.rst +++ /dev/null @@ -1,6 +0,0 @@ -FeaturestoreOnlineServingService --------------------------------------------------- - -.. automodule:: google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service - :members: - :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/featurestore_service.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/featurestore_service.rst deleted file mode 100644 index 8d2f33039e..0000000000 --- a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/featurestore_service.rst +++ /dev/null @@ -1,10 +0,0 @@ -FeaturestoreService -------------------------------------- - -.. automodule:: google.cloud.aiplatform_v1beta1.services.featurestore_service - :members: - :inherited-members: - -.. automodule:: google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers - :members: - :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/index_endpoint_service.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/index_endpoint_service.rst deleted file mode 100644 index 65c910142e..0000000000 --- a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/index_endpoint_service.rst +++ /dev/null @@ -1,10 +0,0 @@ -IndexEndpointService --------------------------------------- - -.. automodule:: google.cloud.aiplatform_v1beta1.services.index_endpoint_service - :members: - :inherited-members: - -.. automodule:: google.cloud.aiplatform_v1beta1.services.index_endpoint_service.pagers - :members: - :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/index_service.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/index_service.rst deleted file mode 100644 index 96afb58594..0000000000 --- a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/index_service.rst +++ /dev/null @@ -1,10 +0,0 @@ -IndexService ------------------------------- - -.. automodule:: google.cloud.aiplatform_v1beta1.services.index_service - :members: - :inherited-members: - -.. automodule:: google.cloud.aiplatform_v1beta1.services.index_service.pagers - :members: - :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/job_service.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/job_service.rst deleted file mode 100644 index 46b1268166..0000000000 --- a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/job_service.rst +++ /dev/null @@ -1,10 +0,0 @@ -JobService ----------------------------- - -.. automodule:: google.cloud.aiplatform_v1beta1.services.job_service - :members: - :inherited-members: - -.. automodule:: google.cloud.aiplatform_v1beta1.services.job_service.pagers - :members: - :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/metadata_service.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/metadata_service.rst deleted file mode 100644 index 3c07725687..0000000000 --- a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/metadata_service.rst +++ /dev/null @@ -1,10 +0,0 @@ -MetadataService ---------------------------------- - -.. automodule:: google.cloud.aiplatform_v1beta1.services.metadata_service - :members: - :inherited-members: - -.. automodule:: google.cloud.aiplatform_v1beta1.services.metadata_service.pagers - :members: - :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/migration_service.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/migration_service.rst deleted file mode 100644 index be164d59ba..0000000000 --- a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/migration_service.rst +++ /dev/null @@ -1,10 +0,0 @@ -MigrationService ----------------------------------- - -.. automodule:: google.cloud.aiplatform_v1beta1.services.migration_service - :members: - :inherited-members: - -.. automodule:: google.cloud.aiplatform_v1beta1.services.migration_service.pagers - :members: - :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/model_service.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/model_service.rst deleted file mode 100644 index be68f796b0..0000000000 --- a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/model_service.rst +++ /dev/null @@ -1,10 +0,0 @@ -ModelService ------------------------------- - -.. automodule:: google.cloud.aiplatform_v1beta1.services.model_service - :members: - :inherited-members: - -.. automodule:: google.cloud.aiplatform_v1beta1.services.model_service.pagers - :members: - :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/pipeline_service.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/pipeline_service.rst deleted file mode 100644 index 1180370863..0000000000 --- a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/pipeline_service.rst +++ /dev/null @@ -1,10 +0,0 @@ -PipelineService ---------------------------------- - -.. automodule:: google.cloud.aiplatform_v1beta1.services.pipeline_service - :members: - :inherited-members: - -.. automodule:: google.cloud.aiplatform_v1beta1.services.pipeline_service.pagers - :members: - :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/prediction_service.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/prediction_service.rst deleted file mode 100644 index 03c1150df0..0000000000 --- a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/prediction_service.rst +++ /dev/null @@ -1,6 +0,0 @@ -PredictionService ------------------------------------ - -.. automodule:: google.cloud.aiplatform_v1beta1.services.prediction_service - :members: - :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/services.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/services.rst deleted file mode 100644 index 490112c7d9..0000000000 --- a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/services.rst +++ /dev/null @@ -1,20 +0,0 @@ -Services for Google Cloud Aiplatform v1beta1 API -================================================ -.. toctree:: - :maxdepth: 2 - - dataset_service - endpoint_service - featurestore_online_serving_service - featurestore_service - index_endpoint_service - index_service - job_service - metadata_service - migration_service - model_service - pipeline_service - prediction_service - specialist_pool_service - tensorboard_service - vizier_service diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/specialist_pool_service.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/specialist_pool_service.rst deleted file mode 100644 index 2f13b68844..0000000000 --- a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/specialist_pool_service.rst +++ /dev/null @@ -1,10 +0,0 @@ -SpecialistPoolService ---------------------------------------- - -.. automodule:: google.cloud.aiplatform_v1beta1.services.specialist_pool_service - :members: - :inherited-members: - -.. automodule:: google.cloud.aiplatform_v1beta1.services.specialist_pool_service.pagers - :members: - :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/tensorboard_service.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/tensorboard_service.rst deleted file mode 100644 index 97d94feedc..0000000000 --- a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/tensorboard_service.rst +++ /dev/null @@ -1,10 +0,0 @@ -TensorboardService ------------------------------------- - -.. automodule:: google.cloud.aiplatform_v1beta1.services.tensorboard_service - :members: - :inherited-members: - -.. automodule:: google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers - :members: - :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/types.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/types.rst deleted file mode 100644 index 770675f8ea..0000000000 --- a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/types.rst +++ /dev/null @@ -1,7 +0,0 @@ -Types for Google Cloud Aiplatform v1beta1 API -============================================= - -.. automodule:: google.cloud.aiplatform_v1beta1.types - :members: - :undoc-members: - :show-inheritance: diff --git a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/vizier_service.rst b/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/vizier_service.rst deleted file mode 100644 index 8cad590f6c..0000000000 --- a/owl-bot-staging/v1beta1/docs/aiplatform_v1beta1/vizier_service.rst +++ /dev/null @@ -1,10 +0,0 @@ -VizierService -------------------------------- - -.. automodule:: google.cloud.aiplatform_v1beta1.services.vizier_service - :members: - :inherited-members: - -.. automodule:: google.cloud.aiplatform_v1beta1.services.vizier_service.pagers - :members: - :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/conf.py b/owl-bot-staging/v1beta1/docs/conf.py deleted file mode 100644 index 7a174dcfc0..0000000000 --- a/owl-bot-staging/v1beta1/docs/conf.py +++ /dev/null @@ -1,376 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# -# google-cloud-aiplatform-v1beta1-schema-trainingjob-definition documentation build configuration file -# -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import sys -import os -import shlex - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, os.path.abspath("..")) - -__version__ = "0.1.0" - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -needs_sphinx = "1.6.3" - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - "sphinx.ext.autodoc", - "sphinx.ext.autosummary", - "sphinx.ext.intersphinx", - "sphinx.ext.coverage", - "sphinx.ext.napoleon", - "sphinx.ext.todo", - "sphinx.ext.viewcode", -] - -# autodoc/autosummary flags -autoclass_content = "both" -autodoc_default_flags = ["members"] -autosummary_generate = True - - -# Add any paths that contain templates here, relative to this directory. -templates_path = ["_templates"] - -# Allow markdown includes (so releases.md can include CHANGLEOG.md) -# http://www.sphinx-doc.org/en/master/markdown.html -source_parsers = {".md": "recommonmark.parser.CommonMarkParser"} - -# The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: -source_suffix = [".rst", ".md"] - -# The encoding of source files. -# source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = "index" - -# General information about the project. -project = u"google-cloud-aiplatform-v1beta1-schema-trainingjob-definition" -copyright = u"2020, Google, LLC" -author = u"Google APIs" # TODO: autogenerate this bit - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The full version, including alpha/beta/rc tags. -release = __version__ -# The short X.Y version. -version = ".".join(release.split(".")[0:2]) - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# This is also used if you do content translation via gettext catalogs. -# Usually you set "language" from the command line for these cases. -language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = ["_build"] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = "sphinx" - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -# keep_warnings = False - -# If true, `todo` and `todoList` produce output, else they produce nothing. -todo_include_todos = True - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = "alabaster" - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -html_theme_options = { - "description": "Google Cloud Aiplatform V1beta1 Schema Trainingjob Client Libraries for Python", - "github_user": "googleapis", - "github_repo": "google-cloud-python", - "github_banner": True, - "font_family": "'Roboto', Georgia, sans", - "head_font_family": "'Roboto', Georgia, serif", - "code_font_family": "'Roboto Mono', 'Consolas', monospace", -} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ["_static"] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -# html_extra_path = [] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -# html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Language to be used for generating the HTML full-text search index. -# Sphinx supports the following languages: -# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' -# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' -# html_search_language = 'en' - -# A dictionary with options for the search language support, empty by default. -# Now only 'ja' uses this config value -# html_search_options = {'type': 'default'} - -# The name of a javascript file (relative to the configuration directory) that -# implements a search results scorer. If empty, the default will be used. -# html_search_scorer = 'scorer.js' - -# Output file base name for HTML help builder. -htmlhelp_basename = "google-cloud-aiplatform-v1beta1-schema-trainingjob-definition-doc" - -# -- Options for warnings ------------------------------------------------------ - - -suppress_warnings = [ - # Temporarily suppress this to avoid "more than one target found for - # cross-reference" warning, which are intractable for us to avoid while in - # a mono-repo. - # See https://github.com/sphinx-doc/sphinx/blob - # /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843 - "ref.python" -] - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - # 'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). - # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. - # 'preamble': '', - # Latex figure (float) alignment - # 'figure_align': 'htbp', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - ( - master_doc, - "google-cloud-aiplatform-v1beta1-schema-trainingjob-definition.tex", - u"google-cloud-aiplatform-v1beta1-schema-trainingjob-definition Documentation", - author, - "manual", - ) -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ( - master_doc, - "google-cloud-aiplatform-v1beta1-schema-trainingjob-definition", - u"Google Cloud Aiplatform V1beta1 Schema Trainingjob Definition Documentation", - [author], - 1, - ) -] - -# If true, show URL addresses after external links. -# man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ( - master_doc, - "google-cloud-aiplatform-v1beta1-schema-trainingjob-definition", - u"google-cloud-aiplatform-v1beta1-schema-trainingjob-definition Documentation", - author, - "google-cloud-aiplatform-v1beta1-schema-trainingjob-definition", - "GAPIC library for Google Cloud Aiplatform V1beta1 Schema Trainingjob Definition API", - "APIs", - ) -] - -# Documents to append as an appendix to all manuals. -# texinfo_appendices = [] - -# If false, no module index is generated. -# texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -# texinfo_no_detailmenu = False - - -# Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = { - "python": ("http://python.readthedocs.org/en/latest/", None), - "gax": ("https://gax-python.readthedocs.org/en/latest/", None), - "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), - "google-gax": ("https://gax-python.readthedocs.io/en/latest/", None), - "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None), - "grpc": ("https://grpc.io/grpc/python/", None), - "requests": ("http://requests.kennethreitz.org/en/stable/", None), - "proto": ("https://proto-plus-python.readthedocs.io/en/stable", None), - "protobuf": ("https://googleapis.dev/python/protobuf/latest/", None), -} - - -# Napoleon settings -napoleon_google_docstring = True -napoleon_numpy_docstring = True -napoleon_include_private_with_doc = False -napoleon_include_special_with_doc = True -napoleon_use_admonition_for_examples = False -napoleon_use_admonition_for_notes = False -napoleon_use_admonition_for_references = False -napoleon_use_ivar = False -napoleon_use_param = True -napoleon_use_rtype = True diff --git a/owl-bot-staging/v1beta1/docs/definition_v1beta1/services.rst b/owl-bot-staging/v1beta1/docs/definition_v1beta1/services.rst deleted file mode 100644 index 5f1ed5f2b7..0000000000 --- a/owl-bot-staging/v1beta1/docs/definition_v1beta1/services.rst +++ /dev/null @@ -1,4 +0,0 @@ -Services for Google Cloud Aiplatform V1beta1 Schema Trainingjob Definition v1beta1 API -====================================================================================== -.. toctree:: - :maxdepth: 2 diff --git a/owl-bot-staging/v1beta1/docs/definition_v1beta1/types.rst b/owl-bot-staging/v1beta1/docs/definition_v1beta1/types.rst deleted file mode 100644 index f4fe7a5301..0000000000 --- a/owl-bot-staging/v1beta1/docs/definition_v1beta1/types.rst +++ /dev/null @@ -1,7 +0,0 @@ -Types for Google Cloud Aiplatform V1beta1 Schema Trainingjob Definition v1beta1 API -=================================================================================== - -.. automodule:: google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types - :members: - :undoc-members: - :show-inheritance: diff --git a/owl-bot-staging/v1beta1/docs/index.rst b/owl-bot-staging/v1beta1/docs/index.rst deleted file mode 100644 index ec6c42c2ed..0000000000 --- a/owl-bot-staging/v1beta1/docs/index.rst +++ /dev/null @@ -1,7 +0,0 @@ -API Reference -------------- -.. toctree:: - :maxdepth: 2 - - definition_v1beta1/services - definition_v1beta1/types diff --git a/owl-bot-staging/v1beta1/docs/instance_v1beta1/services.rst b/owl-bot-staging/v1beta1/docs/instance_v1beta1/services.rst deleted file mode 100644 index 941dbcca59..0000000000 --- a/owl-bot-staging/v1beta1/docs/instance_v1beta1/services.rst +++ /dev/null @@ -1,4 +0,0 @@ -Services for Google Cloud Aiplatform V1beta1 Schema Predict Instance v1beta1 API -================================================================================ -.. toctree:: - :maxdepth: 2 diff --git a/owl-bot-staging/v1beta1/docs/instance_v1beta1/types.rst b/owl-bot-staging/v1beta1/docs/instance_v1beta1/types.rst deleted file mode 100644 index 7caa088065..0000000000 --- a/owl-bot-staging/v1beta1/docs/instance_v1beta1/types.rst +++ /dev/null @@ -1,7 +0,0 @@ -Types for Google Cloud Aiplatform V1beta1 Schema Predict Instance v1beta1 API -============================================================================= - -.. automodule:: google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types - :members: - :undoc-members: - :show-inheritance: diff --git a/owl-bot-staging/v1beta1/docs/params_v1beta1/services.rst b/owl-bot-staging/v1beta1/docs/params_v1beta1/services.rst deleted file mode 100644 index b3b897a0f4..0000000000 --- a/owl-bot-staging/v1beta1/docs/params_v1beta1/services.rst +++ /dev/null @@ -1,4 +0,0 @@ -Services for Google Cloud Aiplatform V1beta1 Schema Predict Params v1beta1 API -============================================================================== -.. toctree:: - :maxdepth: 2 diff --git a/owl-bot-staging/v1beta1/docs/params_v1beta1/types.rst b/owl-bot-staging/v1beta1/docs/params_v1beta1/types.rst deleted file mode 100644 index 722a1d8ba0..0000000000 --- a/owl-bot-staging/v1beta1/docs/params_v1beta1/types.rst +++ /dev/null @@ -1,7 +0,0 @@ -Types for Google Cloud Aiplatform V1beta1 Schema Predict Params v1beta1 API -=========================================================================== - -.. automodule:: google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types - :members: - :undoc-members: - :show-inheritance: diff --git a/owl-bot-staging/v1beta1/docs/prediction_v1beta1/services.rst b/owl-bot-staging/v1beta1/docs/prediction_v1beta1/services.rst deleted file mode 100644 index 6de5e17520..0000000000 --- a/owl-bot-staging/v1beta1/docs/prediction_v1beta1/services.rst +++ /dev/null @@ -1,4 +0,0 @@ -Services for Google Cloud Aiplatform V1beta1 Schema Predict Prediction v1beta1 API -================================================================================== -.. toctree:: - :maxdepth: 2 diff --git a/owl-bot-staging/v1beta1/docs/prediction_v1beta1/types.rst b/owl-bot-staging/v1beta1/docs/prediction_v1beta1/types.rst deleted file mode 100644 index b14182d6d7..0000000000 --- a/owl-bot-staging/v1beta1/docs/prediction_v1beta1/types.rst +++ /dev/null @@ -1,7 +0,0 @@ -Types for Google Cloud Aiplatform V1beta1 Schema Predict Prediction v1beta1 API -=============================================================================== - -.. automodule:: google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types - :members: - :undoc-members: - :show-inheritance: diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/__init__.py deleted file mode 100644 index 181fc767ef..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/__init__.py +++ /dev/null @@ -1,931 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from google.cloud.aiplatform_v1beta1.services.dataset_service.client import DatasetServiceClient -from google.cloud.aiplatform_v1beta1.services.dataset_service.async_client import DatasetServiceAsyncClient -from google.cloud.aiplatform_v1beta1.services.endpoint_service.client import EndpointServiceClient -from google.cloud.aiplatform_v1beta1.services.endpoint_service.async_client import EndpointServiceAsyncClient -from google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service.client import FeaturestoreOnlineServingServiceClient -from google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service.async_client import FeaturestoreOnlineServingServiceAsyncClient -from google.cloud.aiplatform_v1beta1.services.featurestore_service.client import FeaturestoreServiceClient -from google.cloud.aiplatform_v1beta1.services.featurestore_service.async_client import FeaturestoreServiceAsyncClient -from google.cloud.aiplatform_v1beta1.services.index_endpoint_service.client import IndexEndpointServiceClient -from google.cloud.aiplatform_v1beta1.services.index_endpoint_service.async_client import IndexEndpointServiceAsyncClient -from google.cloud.aiplatform_v1beta1.services.index_service.client import IndexServiceClient -from google.cloud.aiplatform_v1beta1.services.index_service.async_client import IndexServiceAsyncClient -from google.cloud.aiplatform_v1beta1.services.job_service.client import JobServiceClient -from google.cloud.aiplatform_v1beta1.services.job_service.async_client import JobServiceAsyncClient -from google.cloud.aiplatform_v1beta1.services.metadata_service.client import MetadataServiceClient -from google.cloud.aiplatform_v1beta1.services.metadata_service.async_client import MetadataServiceAsyncClient -from google.cloud.aiplatform_v1beta1.services.migration_service.client import MigrationServiceClient -from google.cloud.aiplatform_v1beta1.services.migration_service.async_client import MigrationServiceAsyncClient -from google.cloud.aiplatform_v1beta1.services.model_service.client import ModelServiceClient -from google.cloud.aiplatform_v1beta1.services.model_service.async_client import ModelServiceAsyncClient -from google.cloud.aiplatform_v1beta1.services.pipeline_service.client import PipelineServiceClient -from google.cloud.aiplatform_v1beta1.services.pipeline_service.async_client import PipelineServiceAsyncClient -from google.cloud.aiplatform_v1beta1.services.prediction_service.client import PredictionServiceClient -from google.cloud.aiplatform_v1beta1.services.prediction_service.async_client import PredictionServiceAsyncClient -from google.cloud.aiplatform_v1beta1.services.specialist_pool_service.client import SpecialistPoolServiceClient -from google.cloud.aiplatform_v1beta1.services.specialist_pool_service.async_client import SpecialistPoolServiceAsyncClient -from google.cloud.aiplatform_v1beta1.services.tensorboard_service.client import TensorboardServiceClient -from google.cloud.aiplatform_v1beta1.services.tensorboard_service.async_client import TensorboardServiceAsyncClient -from google.cloud.aiplatform_v1beta1.services.vizier_service.client import VizierServiceClient -from google.cloud.aiplatform_v1beta1.services.vizier_service.async_client import VizierServiceAsyncClient - -from google.cloud.aiplatform_v1beta1.types.accelerator_type import AcceleratorType -from google.cloud.aiplatform_v1beta1.types.annotation import Annotation -from google.cloud.aiplatform_v1beta1.types.annotation_spec import AnnotationSpec -from google.cloud.aiplatform_v1beta1.types.artifact import Artifact -from google.cloud.aiplatform_v1beta1.types.batch_prediction_job import BatchPredictionJob -from google.cloud.aiplatform_v1beta1.types.completion_stats import CompletionStats -from google.cloud.aiplatform_v1beta1.types.context import Context -from google.cloud.aiplatform_v1beta1.types.custom_job import ContainerSpec -from google.cloud.aiplatform_v1beta1.types.custom_job import CustomJob -from google.cloud.aiplatform_v1beta1.types.custom_job import CustomJobSpec -from google.cloud.aiplatform_v1beta1.types.custom_job import PythonPackageSpec -from google.cloud.aiplatform_v1beta1.types.custom_job import Scheduling -from google.cloud.aiplatform_v1beta1.types.custom_job import WorkerPoolSpec -from google.cloud.aiplatform_v1beta1.types.data_item import DataItem -from google.cloud.aiplatform_v1beta1.types.data_labeling_job import ActiveLearningConfig -from google.cloud.aiplatform_v1beta1.types.data_labeling_job import DataLabelingJob -from google.cloud.aiplatform_v1beta1.types.data_labeling_job import SampleConfig -from google.cloud.aiplatform_v1beta1.types.data_labeling_job import TrainingConfig -from google.cloud.aiplatform_v1beta1.types.dataset import Dataset -from google.cloud.aiplatform_v1beta1.types.dataset import ExportDataConfig -from google.cloud.aiplatform_v1beta1.types.dataset import ImportDataConfig -from google.cloud.aiplatform_v1beta1.types.dataset_service import CreateDatasetOperationMetadata -from google.cloud.aiplatform_v1beta1.types.dataset_service import CreateDatasetRequest -from google.cloud.aiplatform_v1beta1.types.dataset_service import DeleteDatasetRequest -from google.cloud.aiplatform_v1beta1.types.dataset_service import ExportDataOperationMetadata -from google.cloud.aiplatform_v1beta1.types.dataset_service import ExportDataRequest -from google.cloud.aiplatform_v1beta1.types.dataset_service import ExportDataResponse -from google.cloud.aiplatform_v1beta1.types.dataset_service import GetAnnotationSpecRequest -from google.cloud.aiplatform_v1beta1.types.dataset_service import GetDatasetRequest -from google.cloud.aiplatform_v1beta1.types.dataset_service import ImportDataOperationMetadata -from google.cloud.aiplatform_v1beta1.types.dataset_service import ImportDataRequest -from google.cloud.aiplatform_v1beta1.types.dataset_service import ImportDataResponse -from google.cloud.aiplatform_v1beta1.types.dataset_service import ListAnnotationsRequest -from google.cloud.aiplatform_v1beta1.types.dataset_service import ListAnnotationsResponse -from google.cloud.aiplatform_v1beta1.types.dataset_service import ListDataItemsRequest -from google.cloud.aiplatform_v1beta1.types.dataset_service import ListDataItemsResponse -from google.cloud.aiplatform_v1beta1.types.dataset_service import ListDatasetsRequest -from google.cloud.aiplatform_v1beta1.types.dataset_service import ListDatasetsResponse -from google.cloud.aiplatform_v1beta1.types.dataset_service import UpdateDatasetRequest -from google.cloud.aiplatform_v1beta1.types.deployed_index_ref import DeployedIndexRef -from google.cloud.aiplatform_v1beta1.types.deployed_model_ref import DeployedModelRef -from google.cloud.aiplatform_v1beta1.types.encryption_spec import EncryptionSpec -from google.cloud.aiplatform_v1beta1.types.endpoint import DeployedModel -from google.cloud.aiplatform_v1beta1.types.endpoint import Endpoint -from google.cloud.aiplatform_v1beta1.types.endpoint import PrivateEndpoints -from google.cloud.aiplatform_v1beta1.types.endpoint_service import CreateEndpointOperationMetadata -from google.cloud.aiplatform_v1beta1.types.endpoint_service import CreateEndpointRequest -from google.cloud.aiplatform_v1beta1.types.endpoint_service import DeleteEndpointRequest -from google.cloud.aiplatform_v1beta1.types.endpoint_service import DeployModelOperationMetadata -from google.cloud.aiplatform_v1beta1.types.endpoint_service import DeployModelRequest -from google.cloud.aiplatform_v1beta1.types.endpoint_service import DeployModelResponse -from google.cloud.aiplatform_v1beta1.types.endpoint_service import GetEndpointRequest -from google.cloud.aiplatform_v1beta1.types.endpoint_service import ListEndpointsRequest -from google.cloud.aiplatform_v1beta1.types.endpoint_service import ListEndpointsResponse -from google.cloud.aiplatform_v1beta1.types.endpoint_service import UndeployModelOperationMetadata -from google.cloud.aiplatform_v1beta1.types.endpoint_service import UndeployModelRequest -from google.cloud.aiplatform_v1beta1.types.endpoint_service import UndeployModelResponse -from google.cloud.aiplatform_v1beta1.types.endpoint_service import UpdateEndpointRequest -from google.cloud.aiplatform_v1beta1.types.entity_type import EntityType -from google.cloud.aiplatform_v1beta1.types.env_var import EnvVar -from google.cloud.aiplatform_v1beta1.types.event import Event -from google.cloud.aiplatform_v1beta1.types.execution import Execution -from google.cloud.aiplatform_v1beta1.types.explanation import Attribution -from google.cloud.aiplatform_v1beta1.types.explanation import BlurBaselineConfig -from google.cloud.aiplatform_v1beta1.types.explanation import Explanation -from google.cloud.aiplatform_v1beta1.types.explanation import ExplanationMetadataOverride -from google.cloud.aiplatform_v1beta1.types.explanation import ExplanationParameters -from google.cloud.aiplatform_v1beta1.types.explanation import ExplanationSpec -from google.cloud.aiplatform_v1beta1.types.explanation import ExplanationSpecOverride -from google.cloud.aiplatform_v1beta1.types.explanation import FeatureNoiseSigma -from google.cloud.aiplatform_v1beta1.types.explanation import IntegratedGradientsAttribution -from google.cloud.aiplatform_v1beta1.types.explanation import ModelExplanation -from google.cloud.aiplatform_v1beta1.types.explanation import SampledShapleyAttribution -from google.cloud.aiplatform_v1beta1.types.explanation import Similarity -from google.cloud.aiplatform_v1beta1.types.explanation import SmoothGradConfig -from google.cloud.aiplatform_v1beta1.types.explanation import XraiAttribution -from google.cloud.aiplatform_v1beta1.types.explanation_metadata import ExplanationMetadata -from google.cloud.aiplatform_v1beta1.types.feature import Feature -from google.cloud.aiplatform_v1beta1.types.feature_monitoring_stats import FeatureStatsAnomaly -from google.cloud.aiplatform_v1beta1.types.feature_selector import FeatureSelector -from google.cloud.aiplatform_v1beta1.types.feature_selector import IdMatcher -from google.cloud.aiplatform_v1beta1.types.featurestore import Featurestore -from google.cloud.aiplatform_v1beta1.types.featurestore_monitoring import FeaturestoreMonitoringConfig -from google.cloud.aiplatform_v1beta1.types.featurestore_online_service import FeatureValue -from google.cloud.aiplatform_v1beta1.types.featurestore_online_service import FeatureValueList -from google.cloud.aiplatform_v1beta1.types.featurestore_online_service import ReadFeatureValuesRequest -from google.cloud.aiplatform_v1beta1.types.featurestore_online_service import ReadFeatureValuesResponse -from google.cloud.aiplatform_v1beta1.types.featurestore_online_service import StreamingReadFeatureValuesRequest -from google.cloud.aiplatform_v1beta1.types.featurestore_service import BatchCreateFeaturesOperationMetadata -from google.cloud.aiplatform_v1beta1.types.featurestore_service import BatchCreateFeaturesRequest -from google.cloud.aiplatform_v1beta1.types.featurestore_service import BatchCreateFeaturesResponse -from google.cloud.aiplatform_v1beta1.types.featurestore_service import BatchReadFeatureValuesOperationMetadata -from google.cloud.aiplatform_v1beta1.types.featurestore_service import BatchReadFeatureValuesRequest -from google.cloud.aiplatform_v1beta1.types.featurestore_service import BatchReadFeatureValuesResponse -from google.cloud.aiplatform_v1beta1.types.featurestore_service import CreateEntityTypeOperationMetadata -from google.cloud.aiplatform_v1beta1.types.featurestore_service import CreateEntityTypeRequest -from google.cloud.aiplatform_v1beta1.types.featurestore_service import CreateFeatureOperationMetadata -from google.cloud.aiplatform_v1beta1.types.featurestore_service import CreateFeatureRequest -from google.cloud.aiplatform_v1beta1.types.featurestore_service import CreateFeaturestoreOperationMetadata -from google.cloud.aiplatform_v1beta1.types.featurestore_service import CreateFeaturestoreRequest -from google.cloud.aiplatform_v1beta1.types.featurestore_service import DeleteEntityTypeRequest -from google.cloud.aiplatform_v1beta1.types.featurestore_service import DeleteFeatureRequest -from google.cloud.aiplatform_v1beta1.types.featurestore_service import DeleteFeaturestoreRequest -from google.cloud.aiplatform_v1beta1.types.featurestore_service import DestinationFeatureSetting -from google.cloud.aiplatform_v1beta1.types.featurestore_service import ExportFeatureValuesOperationMetadata -from google.cloud.aiplatform_v1beta1.types.featurestore_service import ExportFeatureValuesRequest -from google.cloud.aiplatform_v1beta1.types.featurestore_service import ExportFeatureValuesResponse -from google.cloud.aiplatform_v1beta1.types.featurestore_service import FeatureValueDestination -from google.cloud.aiplatform_v1beta1.types.featurestore_service import GetEntityTypeRequest -from google.cloud.aiplatform_v1beta1.types.featurestore_service import GetFeatureRequest -from google.cloud.aiplatform_v1beta1.types.featurestore_service import GetFeaturestoreRequest -from google.cloud.aiplatform_v1beta1.types.featurestore_service import ImportFeatureValuesOperationMetadata -from google.cloud.aiplatform_v1beta1.types.featurestore_service import ImportFeatureValuesRequest -from google.cloud.aiplatform_v1beta1.types.featurestore_service import ImportFeatureValuesResponse -from google.cloud.aiplatform_v1beta1.types.featurestore_service import ListEntityTypesRequest -from google.cloud.aiplatform_v1beta1.types.featurestore_service import ListEntityTypesResponse -from google.cloud.aiplatform_v1beta1.types.featurestore_service import ListFeaturesRequest -from google.cloud.aiplatform_v1beta1.types.featurestore_service import ListFeaturesResponse -from google.cloud.aiplatform_v1beta1.types.featurestore_service import ListFeaturestoresRequest -from google.cloud.aiplatform_v1beta1.types.featurestore_service import ListFeaturestoresResponse -from google.cloud.aiplatform_v1beta1.types.featurestore_service import SearchFeaturesRequest -from google.cloud.aiplatform_v1beta1.types.featurestore_service import SearchFeaturesResponse -from google.cloud.aiplatform_v1beta1.types.featurestore_service import UpdateEntityTypeRequest -from google.cloud.aiplatform_v1beta1.types.featurestore_service import UpdateFeatureRequest -from google.cloud.aiplatform_v1beta1.types.featurestore_service import UpdateFeaturestoreOperationMetadata -from google.cloud.aiplatform_v1beta1.types.featurestore_service import UpdateFeaturestoreRequest -from google.cloud.aiplatform_v1beta1.types.hyperparameter_tuning_job import HyperparameterTuningJob -from google.cloud.aiplatform_v1beta1.types.index import Index -from google.cloud.aiplatform_v1beta1.types.index_endpoint import DeployedIndex -from google.cloud.aiplatform_v1beta1.types.index_endpoint import DeployedIndexAuthConfig -from google.cloud.aiplatform_v1beta1.types.index_endpoint import IndexEndpoint -from google.cloud.aiplatform_v1beta1.types.index_endpoint import IndexPrivateEndpoints -from google.cloud.aiplatform_v1beta1.types.index_endpoint_service import CreateIndexEndpointOperationMetadata -from google.cloud.aiplatform_v1beta1.types.index_endpoint_service import CreateIndexEndpointRequest -from google.cloud.aiplatform_v1beta1.types.index_endpoint_service import DeleteIndexEndpointRequest -from google.cloud.aiplatform_v1beta1.types.index_endpoint_service import DeployIndexOperationMetadata -from google.cloud.aiplatform_v1beta1.types.index_endpoint_service import DeployIndexRequest -from google.cloud.aiplatform_v1beta1.types.index_endpoint_service import DeployIndexResponse -from google.cloud.aiplatform_v1beta1.types.index_endpoint_service import GetIndexEndpointRequest -from google.cloud.aiplatform_v1beta1.types.index_endpoint_service import ListIndexEndpointsRequest -from google.cloud.aiplatform_v1beta1.types.index_endpoint_service import ListIndexEndpointsResponse -from google.cloud.aiplatform_v1beta1.types.index_endpoint_service import MutateDeployedIndexOperationMetadata -from google.cloud.aiplatform_v1beta1.types.index_endpoint_service import MutateDeployedIndexRequest -from google.cloud.aiplatform_v1beta1.types.index_endpoint_service import MutateDeployedIndexResponse -from google.cloud.aiplatform_v1beta1.types.index_endpoint_service import UndeployIndexOperationMetadata -from google.cloud.aiplatform_v1beta1.types.index_endpoint_service import UndeployIndexRequest -from google.cloud.aiplatform_v1beta1.types.index_endpoint_service import UndeployIndexResponse -from google.cloud.aiplatform_v1beta1.types.index_endpoint_service import UpdateIndexEndpointRequest -from google.cloud.aiplatform_v1beta1.types.index_service import CreateIndexOperationMetadata -from google.cloud.aiplatform_v1beta1.types.index_service import CreateIndexRequest -from google.cloud.aiplatform_v1beta1.types.index_service import DeleteIndexRequest -from google.cloud.aiplatform_v1beta1.types.index_service import GetIndexRequest -from google.cloud.aiplatform_v1beta1.types.index_service import ListIndexesRequest -from google.cloud.aiplatform_v1beta1.types.index_service import ListIndexesResponse -from google.cloud.aiplatform_v1beta1.types.index_service import NearestNeighborSearchOperationMetadata -from google.cloud.aiplatform_v1beta1.types.index_service import UpdateIndexOperationMetadata -from google.cloud.aiplatform_v1beta1.types.index_service import UpdateIndexRequest -from google.cloud.aiplatform_v1beta1.types.io import AvroSource -from google.cloud.aiplatform_v1beta1.types.io import BigQueryDestination -from google.cloud.aiplatform_v1beta1.types.io import BigQuerySource -from google.cloud.aiplatform_v1beta1.types.io import ContainerRegistryDestination -from google.cloud.aiplatform_v1beta1.types.io import CsvDestination -from google.cloud.aiplatform_v1beta1.types.io import CsvSource -from google.cloud.aiplatform_v1beta1.types.io import GcsDestination -from google.cloud.aiplatform_v1beta1.types.io import GcsSource -from google.cloud.aiplatform_v1beta1.types.io import TFRecordDestination -from google.cloud.aiplatform_v1beta1.types.job_service import CancelBatchPredictionJobRequest -from google.cloud.aiplatform_v1beta1.types.job_service import CancelCustomJobRequest -from google.cloud.aiplatform_v1beta1.types.job_service import CancelDataLabelingJobRequest -from google.cloud.aiplatform_v1beta1.types.job_service import CancelHyperparameterTuningJobRequest -from google.cloud.aiplatform_v1beta1.types.job_service import CreateBatchPredictionJobRequest -from google.cloud.aiplatform_v1beta1.types.job_service import CreateCustomJobRequest -from google.cloud.aiplatform_v1beta1.types.job_service import CreateDataLabelingJobRequest -from google.cloud.aiplatform_v1beta1.types.job_service import CreateHyperparameterTuningJobRequest -from google.cloud.aiplatform_v1beta1.types.job_service import CreateModelDeploymentMonitoringJobRequest -from google.cloud.aiplatform_v1beta1.types.job_service import DeleteBatchPredictionJobRequest -from google.cloud.aiplatform_v1beta1.types.job_service import DeleteCustomJobRequest -from google.cloud.aiplatform_v1beta1.types.job_service import DeleteDataLabelingJobRequest -from google.cloud.aiplatform_v1beta1.types.job_service import DeleteHyperparameterTuningJobRequest -from google.cloud.aiplatform_v1beta1.types.job_service import DeleteModelDeploymentMonitoringJobRequest -from google.cloud.aiplatform_v1beta1.types.job_service import GetBatchPredictionJobRequest -from google.cloud.aiplatform_v1beta1.types.job_service import GetCustomJobRequest -from google.cloud.aiplatform_v1beta1.types.job_service import GetDataLabelingJobRequest -from google.cloud.aiplatform_v1beta1.types.job_service import GetHyperparameterTuningJobRequest -from google.cloud.aiplatform_v1beta1.types.job_service import GetModelDeploymentMonitoringJobRequest -from google.cloud.aiplatform_v1beta1.types.job_service import ListBatchPredictionJobsRequest -from google.cloud.aiplatform_v1beta1.types.job_service import ListBatchPredictionJobsResponse -from google.cloud.aiplatform_v1beta1.types.job_service import ListCustomJobsRequest -from google.cloud.aiplatform_v1beta1.types.job_service import ListCustomJobsResponse -from google.cloud.aiplatform_v1beta1.types.job_service import ListDataLabelingJobsRequest -from google.cloud.aiplatform_v1beta1.types.job_service import ListDataLabelingJobsResponse -from google.cloud.aiplatform_v1beta1.types.job_service import ListHyperparameterTuningJobsRequest -from google.cloud.aiplatform_v1beta1.types.job_service import ListHyperparameterTuningJobsResponse -from google.cloud.aiplatform_v1beta1.types.job_service import ListModelDeploymentMonitoringJobsRequest -from google.cloud.aiplatform_v1beta1.types.job_service import ListModelDeploymentMonitoringJobsResponse -from google.cloud.aiplatform_v1beta1.types.job_service import PauseModelDeploymentMonitoringJobRequest -from google.cloud.aiplatform_v1beta1.types.job_service import ResumeModelDeploymentMonitoringJobRequest -from google.cloud.aiplatform_v1beta1.types.job_service import SearchModelDeploymentMonitoringStatsAnomaliesRequest -from google.cloud.aiplatform_v1beta1.types.job_service import SearchModelDeploymentMonitoringStatsAnomaliesResponse -from google.cloud.aiplatform_v1beta1.types.job_service import UpdateModelDeploymentMonitoringJobOperationMetadata -from google.cloud.aiplatform_v1beta1.types.job_service import UpdateModelDeploymentMonitoringJobRequest -from google.cloud.aiplatform_v1beta1.types.job_state import JobState -from google.cloud.aiplatform_v1beta1.types.lineage_subgraph import LineageSubgraph -from google.cloud.aiplatform_v1beta1.types.machine_resources import AutomaticResources -from google.cloud.aiplatform_v1beta1.types.machine_resources import AutoscalingMetricSpec -from google.cloud.aiplatform_v1beta1.types.machine_resources import BatchDedicatedResources -from google.cloud.aiplatform_v1beta1.types.machine_resources import DedicatedResources -from google.cloud.aiplatform_v1beta1.types.machine_resources import DiskSpec -from google.cloud.aiplatform_v1beta1.types.machine_resources import MachineSpec -from google.cloud.aiplatform_v1beta1.types.machine_resources import ResourcesConsumed -from google.cloud.aiplatform_v1beta1.types.manual_batch_tuning_parameters import ManualBatchTuningParameters -from google.cloud.aiplatform_v1beta1.types.metadata_schema import MetadataSchema -from google.cloud.aiplatform_v1beta1.types.metadata_service import AddContextArtifactsAndExecutionsRequest -from google.cloud.aiplatform_v1beta1.types.metadata_service import AddContextArtifactsAndExecutionsResponse -from google.cloud.aiplatform_v1beta1.types.metadata_service import AddContextChildrenRequest -from google.cloud.aiplatform_v1beta1.types.metadata_service import AddContextChildrenResponse -from google.cloud.aiplatform_v1beta1.types.metadata_service import AddExecutionEventsRequest -from google.cloud.aiplatform_v1beta1.types.metadata_service import AddExecutionEventsResponse -from google.cloud.aiplatform_v1beta1.types.metadata_service import CreateArtifactRequest -from google.cloud.aiplatform_v1beta1.types.metadata_service import CreateContextRequest -from google.cloud.aiplatform_v1beta1.types.metadata_service import CreateExecutionRequest -from google.cloud.aiplatform_v1beta1.types.metadata_service import CreateMetadataSchemaRequest -from google.cloud.aiplatform_v1beta1.types.metadata_service import CreateMetadataStoreOperationMetadata -from google.cloud.aiplatform_v1beta1.types.metadata_service import CreateMetadataStoreRequest -from google.cloud.aiplatform_v1beta1.types.metadata_service import DeleteArtifactRequest -from google.cloud.aiplatform_v1beta1.types.metadata_service import DeleteContextRequest -from google.cloud.aiplatform_v1beta1.types.metadata_service import DeleteExecutionRequest -from google.cloud.aiplatform_v1beta1.types.metadata_service import DeleteMetadataStoreOperationMetadata -from google.cloud.aiplatform_v1beta1.types.metadata_service import DeleteMetadataStoreRequest -from google.cloud.aiplatform_v1beta1.types.metadata_service import GetArtifactRequest -from google.cloud.aiplatform_v1beta1.types.metadata_service import GetContextRequest -from google.cloud.aiplatform_v1beta1.types.metadata_service import GetExecutionRequest -from google.cloud.aiplatform_v1beta1.types.metadata_service import GetMetadataSchemaRequest -from google.cloud.aiplatform_v1beta1.types.metadata_service import GetMetadataStoreRequest -from google.cloud.aiplatform_v1beta1.types.metadata_service import ListArtifactsRequest -from google.cloud.aiplatform_v1beta1.types.metadata_service import ListArtifactsResponse -from google.cloud.aiplatform_v1beta1.types.metadata_service import ListContextsRequest -from google.cloud.aiplatform_v1beta1.types.metadata_service import ListContextsResponse -from google.cloud.aiplatform_v1beta1.types.metadata_service import ListExecutionsRequest -from google.cloud.aiplatform_v1beta1.types.metadata_service import ListExecutionsResponse -from google.cloud.aiplatform_v1beta1.types.metadata_service import ListMetadataSchemasRequest -from google.cloud.aiplatform_v1beta1.types.metadata_service import ListMetadataSchemasResponse -from google.cloud.aiplatform_v1beta1.types.metadata_service import ListMetadataStoresRequest -from google.cloud.aiplatform_v1beta1.types.metadata_service import ListMetadataStoresResponse -from google.cloud.aiplatform_v1beta1.types.metadata_service import PurgeArtifactsMetadata -from google.cloud.aiplatform_v1beta1.types.metadata_service import PurgeArtifactsRequest -from google.cloud.aiplatform_v1beta1.types.metadata_service import PurgeArtifactsResponse -from google.cloud.aiplatform_v1beta1.types.metadata_service import PurgeContextsMetadata -from google.cloud.aiplatform_v1beta1.types.metadata_service import PurgeContextsRequest -from google.cloud.aiplatform_v1beta1.types.metadata_service import PurgeContextsResponse -from google.cloud.aiplatform_v1beta1.types.metadata_service import PurgeExecutionsMetadata -from google.cloud.aiplatform_v1beta1.types.metadata_service import PurgeExecutionsRequest -from google.cloud.aiplatform_v1beta1.types.metadata_service import PurgeExecutionsResponse -from google.cloud.aiplatform_v1beta1.types.metadata_service import QueryArtifactLineageSubgraphRequest -from google.cloud.aiplatform_v1beta1.types.metadata_service import QueryContextLineageSubgraphRequest -from google.cloud.aiplatform_v1beta1.types.metadata_service import QueryExecutionInputsAndOutputsRequest -from google.cloud.aiplatform_v1beta1.types.metadata_service import UpdateArtifactRequest -from google.cloud.aiplatform_v1beta1.types.metadata_service import UpdateContextRequest -from google.cloud.aiplatform_v1beta1.types.metadata_service import UpdateExecutionRequest -from google.cloud.aiplatform_v1beta1.types.metadata_store import MetadataStore -from google.cloud.aiplatform_v1beta1.types.migratable_resource import MigratableResource -from google.cloud.aiplatform_v1beta1.types.migration_service import BatchMigrateResourcesOperationMetadata -from google.cloud.aiplatform_v1beta1.types.migration_service import BatchMigrateResourcesRequest -from google.cloud.aiplatform_v1beta1.types.migration_service import BatchMigrateResourcesResponse -from google.cloud.aiplatform_v1beta1.types.migration_service import MigrateResourceRequest -from google.cloud.aiplatform_v1beta1.types.migration_service import MigrateResourceResponse -from google.cloud.aiplatform_v1beta1.types.migration_service import SearchMigratableResourcesRequest -from google.cloud.aiplatform_v1beta1.types.migration_service import SearchMigratableResourcesResponse -from google.cloud.aiplatform_v1beta1.types.model import Model -from google.cloud.aiplatform_v1beta1.types.model import ModelContainerSpec -from google.cloud.aiplatform_v1beta1.types.model import Port -from google.cloud.aiplatform_v1beta1.types.model import PredictSchemata -from google.cloud.aiplatform_v1beta1.types.model_deployment_monitoring_job import ModelDeploymentMonitoringBigQueryTable -from google.cloud.aiplatform_v1beta1.types.model_deployment_monitoring_job import ModelDeploymentMonitoringJob -from google.cloud.aiplatform_v1beta1.types.model_deployment_monitoring_job import ModelDeploymentMonitoringObjectiveConfig -from google.cloud.aiplatform_v1beta1.types.model_deployment_monitoring_job import ModelDeploymentMonitoringScheduleConfig -from google.cloud.aiplatform_v1beta1.types.model_deployment_monitoring_job import ModelMonitoringStatsAnomalies -from google.cloud.aiplatform_v1beta1.types.model_deployment_monitoring_job import ModelDeploymentMonitoringObjectiveType -from google.cloud.aiplatform_v1beta1.types.model_evaluation import ModelEvaluation -from google.cloud.aiplatform_v1beta1.types.model_evaluation_slice import ModelEvaluationSlice -from google.cloud.aiplatform_v1beta1.types.model_monitoring import ModelMonitoringAlertConfig -from google.cloud.aiplatform_v1beta1.types.model_monitoring import ModelMonitoringObjectiveConfig -from google.cloud.aiplatform_v1beta1.types.model_monitoring import SamplingStrategy -from google.cloud.aiplatform_v1beta1.types.model_monitoring import ThresholdConfig -from google.cloud.aiplatform_v1beta1.types.model_service import DeleteModelRequest -from google.cloud.aiplatform_v1beta1.types.model_service import ExportModelOperationMetadata -from google.cloud.aiplatform_v1beta1.types.model_service import ExportModelRequest -from google.cloud.aiplatform_v1beta1.types.model_service import ExportModelResponse -from google.cloud.aiplatform_v1beta1.types.model_service import GetModelEvaluationRequest -from google.cloud.aiplatform_v1beta1.types.model_service import GetModelEvaluationSliceRequest -from google.cloud.aiplatform_v1beta1.types.model_service import GetModelRequest -from google.cloud.aiplatform_v1beta1.types.model_service import ListModelEvaluationSlicesRequest -from google.cloud.aiplatform_v1beta1.types.model_service import ListModelEvaluationSlicesResponse -from google.cloud.aiplatform_v1beta1.types.model_service import ListModelEvaluationsRequest -from google.cloud.aiplatform_v1beta1.types.model_service import ListModelEvaluationsResponse -from google.cloud.aiplatform_v1beta1.types.model_service import ListModelsRequest -from google.cloud.aiplatform_v1beta1.types.model_service import ListModelsResponse -from google.cloud.aiplatform_v1beta1.types.model_service import UpdateModelRequest -from google.cloud.aiplatform_v1beta1.types.model_service import UploadModelOperationMetadata -from google.cloud.aiplatform_v1beta1.types.model_service import UploadModelRequest -from google.cloud.aiplatform_v1beta1.types.model_service import UploadModelResponse -from google.cloud.aiplatform_v1beta1.types.operation import DeleteOperationMetadata -from google.cloud.aiplatform_v1beta1.types.operation import GenericOperationMetadata -from google.cloud.aiplatform_v1beta1.types.pipeline_job import PipelineJob -from google.cloud.aiplatform_v1beta1.types.pipeline_job import PipelineJobDetail -from google.cloud.aiplatform_v1beta1.types.pipeline_job import PipelineTaskDetail -from google.cloud.aiplatform_v1beta1.types.pipeline_job import PipelineTaskExecutorDetail -from google.cloud.aiplatform_v1beta1.types.pipeline_service import CancelPipelineJobRequest -from google.cloud.aiplatform_v1beta1.types.pipeline_service import CancelTrainingPipelineRequest -from google.cloud.aiplatform_v1beta1.types.pipeline_service import CreatePipelineJobRequest -from google.cloud.aiplatform_v1beta1.types.pipeline_service import CreateTrainingPipelineRequest -from google.cloud.aiplatform_v1beta1.types.pipeline_service import DeletePipelineJobRequest -from google.cloud.aiplatform_v1beta1.types.pipeline_service import DeleteTrainingPipelineRequest -from google.cloud.aiplatform_v1beta1.types.pipeline_service import GetPipelineJobRequest -from google.cloud.aiplatform_v1beta1.types.pipeline_service import GetTrainingPipelineRequest -from google.cloud.aiplatform_v1beta1.types.pipeline_service import ListPipelineJobsRequest -from google.cloud.aiplatform_v1beta1.types.pipeline_service import ListPipelineJobsResponse -from google.cloud.aiplatform_v1beta1.types.pipeline_service import ListTrainingPipelinesRequest -from google.cloud.aiplatform_v1beta1.types.pipeline_service import ListTrainingPipelinesResponse -from google.cloud.aiplatform_v1beta1.types.pipeline_state import PipelineState -from google.cloud.aiplatform_v1beta1.types.prediction_service import ExplainRequest -from google.cloud.aiplatform_v1beta1.types.prediction_service import ExplainResponse -from google.cloud.aiplatform_v1beta1.types.prediction_service import PredictRequest -from google.cloud.aiplatform_v1beta1.types.prediction_service import PredictResponse -from google.cloud.aiplatform_v1beta1.types.prediction_service import RawPredictRequest -from google.cloud.aiplatform_v1beta1.types.specialist_pool import SpecialistPool -from google.cloud.aiplatform_v1beta1.types.specialist_pool_service import CreateSpecialistPoolOperationMetadata -from google.cloud.aiplatform_v1beta1.types.specialist_pool_service import CreateSpecialistPoolRequest -from google.cloud.aiplatform_v1beta1.types.specialist_pool_service import DeleteSpecialistPoolRequest -from google.cloud.aiplatform_v1beta1.types.specialist_pool_service import GetSpecialistPoolRequest -from google.cloud.aiplatform_v1beta1.types.specialist_pool_service import ListSpecialistPoolsRequest -from google.cloud.aiplatform_v1beta1.types.specialist_pool_service import ListSpecialistPoolsResponse -from google.cloud.aiplatform_v1beta1.types.specialist_pool_service import UpdateSpecialistPoolOperationMetadata -from google.cloud.aiplatform_v1beta1.types.specialist_pool_service import UpdateSpecialistPoolRequest -from google.cloud.aiplatform_v1beta1.types.study import Measurement -from google.cloud.aiplatform_v1beta1.types.study import Study -from google.cloud.aiplatform_v1beta1.types.study import StudySpec -from google.cloud.aiplatform_v1beta1.types.study import Trial -from google.cloud.aiplatform_v1beta1.types.tensorboard import Tensorboard -from google.cloud.aiplatform_v1beta1.types.tensorboard_data import Scalar -from google.cloud.aiplatform_v1beta1.types.tensorboard_data import TensorboardBlob -from google.cloud.aiplatform_v1beta1.types.tensorboard_data import TensorboardBlobSequence -from google.cloud.aiplatform_v1beta1.types.tensorboard_data import TensorboardTensor -from google.cloud.aiplatform_v1beta1.types.tensorboard_data import TimeSeriesData -from google.cloud.aiplatform_v1beta1.types.tensorboard_data import TimeSeriesDataPoint -from google.cloud.aiplatform_v1beta1.types.tensorboard_experiment import TensorboardExperiment -from google.cloud.aiplatform_v1beta1.types.tensorboard_run import TensorboardRun -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import BatchCreateTensorboardRunsRequest -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import BatchCreateTensorboardRunsResponse -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import BatchCreateTensorboardTimeSeriesRequest -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import BatchCreateTensorboardTimeSeriesResponse -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import BatchReadTensorboardTimeSeriesDataRequest -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import BatchReadTensorboardTimeSeriesDataResponse -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import CreateTensorboardExperimentRequest -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import CreateTensorboardOperationMetadata -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import CreateTensorboardRequest -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import CreateTensorboardRunRequest -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import CreateTensorboardTimeSeriesRequest -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import DeleteTensorboardExperimentRequest -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import DeleteTensorboardRequest -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import DeleteTensorboardRunRequest -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import DeleteTensorboardTimeSeriesRequest -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import ExportTensorboardTimeSeriesDataRequest -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import ExportTensorboardTimeSeriesDataResponse -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import GetTensorboardExperimentRequest -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import GetTensorboardRequest -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import GetTensorboardRunRequest -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import GetTensorboardTimeSeriesRequest -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import ListTensorboardExperimentsRequest -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import ListTensorboardExperimentsResponse -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import ListTensorboardRunsRequest -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import ListTensorboardRunsResponse -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import ListTensorboardsRequest -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import ListTensorboardsResponse -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import ListTensorboardTimeSeriesRequest -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import ListTensorboardTimeSeriesResponse -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import ReadTensorboardBlobDataRequest -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import ReadTensorboardBlobDataResponse -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import ReadTensorboardTimeSeriesDataRequest -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import ReadTensorboardTimeSeriesDataResponse -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import UpdateTensorboardExperimentRequest -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import UpdateTensorboardOperationMetadata -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import UpdateTensorboardRequest -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import UpdateTensorboardRunRequest -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import UpdateTensorboardTimeSeriesRequest -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import WriteTensorboardExperimentDataRequest -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import WriteTensorboardExperimentDataResponse -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import WriteTensorboardRunDataRequest -from google.cloud.aiplatform_v1beta1.types.tensorboard_service import WriteTensorboardRunDataResponse -from google.cloud.aiplatform_v1beta1.types.tensorboard_time_series import TensorboardTimeSeries -from google.cloud.aiplatform_v1beta1.types.training_pipeline import FilterSplit -from google.cloud.aiplatform_v1beta1.types.training_pipeline import FractionSplit -from google.cloud.aiplatform_v1beta1.types.training_pipeline import InputDataConfig -from google.cloud.aiplatform_v1beta1.types.training_pipeline import PredefinedSplit -from google.cloud.aiplatform_v1beta1.types.training_pipeline import StratifiedSplit -from google.cloud.aiplatform_v1beta1.types.training_pipeline import TimestampSplit -from google.cloud.aiplatform_v1beta1.types.training_pipeline import TrainingPipeline -from google.cloud.aiplatform_v1beta1.types.types import BoolArray -from google.cloud.aiplatform_v1beta1.types.types import DoubleArray -from google.cloud.aiplatform_v1beta1.types.types import Int64Array -from google.cloud.aiplatform_v1beta1.types.types import StringArray -from google.cloud.aiplatform_v1beta1.types.unmanaged_container_model import UnmanagedContainerModel -from google.cloud.aiplatform_v1beta1.types.user_action_reference import UserActionReference -from google.cloud.aiplatform_v1beta1.types.value import Value -from google.cloud.aiplatform_v1beta1.types.vizier_service import AddTrialMeasurementRequest -from google.cloud.aiplatform_v1beta1.types.vizier_service import CheckTrialEarlyStoppingStateMetatdata -from google.cloud.aiplatform_v1beta1.types.vizier_service import CheckTrialEarlyStoppingStateRequest -from google.cloud.aiplatform_v1beta1.types.vizier_service import CheckTrialEarlyStoppingStateResponse -from google.cloud.aiplatform_v1beta1.types.vizier_service import CompleteTrialRequest -from google.cloud.aiplatform_v1beta1.types.vizier_service import CreateStudyRequest -from google.cloud.aiplatform_v1beta1.types.vizier_service import CreateTrialRequest -from google.cloud.aiplatform_v1beta1.types.vizier_service import DeleteStudyRequest -from google.cloud.aiplatform_v1beta1.types.vizier_service import DeleteTrialRequest -from google.cloud.aiplatform_v1beta1.types.vizier_service import GetStudyRequest -from google.cloud.aiplatform_v1beta1.types.vizier_service import GetTrialRequest -from google.cloud.aiplatform_v1beta1.types.vizier_service import ListOptimalTrialsRequest -from google.cloud.aiplatform_v1beta1.types.vizier_service import ListOptimalTrialsResponse -from google.cloud.aiplatform_v1beta1.types.vizier_service import ListStudiesRequest -from google.cloud.aiplatform_v1beta1.types.vizier_service import ListStudiesResponse -from google.cloud.aiplatform_v1beta1.types.vizier_service import ListTrialsRequest -from google.cloud.aiplatform_v1beta1.types.vizier_service import ListTrialsResponse -from google.cloud.aiplatform_v1beta1.types.vizier_service import LookupStudyRequest -from google.cloud.aiplatform_v1beta1.types.vizier_service import StopTrialRequest -from google.cloud.aiplatform_v1beta1.types.vizier_service import SuggestTrialsMetadata -from google.cloud.aiplatform_v1beta1.types.vizier_service import SuggestTrialsRequest -from google.cloud.aiplatform_v1beta1.types.vizier_service import SuggestTrialsResponse - -__all__ = ('DatasetServiceClient', - 'DatasetServiceAsyncClient', - 'EndpointServiceClient', - 'EndpointServiceAsyncClient', - 'FeaturestoreOnlineServingServiceClient', - 'FeaturestoreOnlineServingServiceAsyncClient', - 'FeaturestoreServiceClient', - 'FeaturestoreServiceAsyncClient', - 'IndexEndpointServiceClient', - 'IndexEndpointServiceAsyncClient', - 'IndexServiceClient', - 'IndexServiceAsyncClient', - 'JobServiceClient', - 'JobServiceAsyncClient', - 'MetadataServiceClient', - 'MetadataServiceAsyncClient', - 'MigrationServiceClient', - 'MigrationServiceAsyncClient', - 'ModelServiceClient', - 'ModelServiceAsyncClient', - 'PipelineServiceClient', - 'PipelineServiceAsyncClient', - 'PredictionServiceClient', - 'PredictionServiceAsyncClient', - 'SpecialistPoolServiceClient', - 'SpecialistPoolServiceAsyncClient', - 'TensorboardServiceClient', - 'TensorboardServiceAsyncClient', - 'VizierServiceClient', - 'VizierServiceAsyncClient', - 'AcceleratorType', - 'Annotation', - 'AnnotationSpec', - 'Artifact', - 'BatchPredictionJob', - 'CompletionStats', - 'Context', - 'ContainerSpec', - 'CustomJob', - 'CustomJobSpec', - 'PythonPackageSpec', - 'Scheduling', - 'WorkerPoolSpec', - 'DataItem', - 'ActiveLearningConfig', - 'DataLabelingJob', - 'SampleConfig', - 'TrainingConfig', - 'Dataset', - 'ExportDataConfig', - 'ImportDataConfig', - 'CreateDatasetOperationMetadata', - 'CreateDatasetRequest', - 'DeleteDatasetRequest', - 'ExportDataOperationMetadata', - 'ExportDataRequest', - 'ExportDataResponse', - 'GetAnnotationSpecRequest', - 'GetDatasetRequest', - 'ImportDataOperationMetadata', - 'ImportDataRequest', - 'ImportDataResponse', - 'ListAnnotationsRequest', - 'ListAnnotationsResponse', - 'ListDataItemsRequest', - 'ListDataItemsResponse', - 'ListDatasetsRequest', - 'ListDatasetsResponse', - 'UpdateDatasetRequest', - 'DeployedIndexRef', - 'DeployedModelRef', - 'EncryptionSpec', - 'DeployedModel', - 'Endpoint', - 'PrivateEndpoints', - 'CreateEndpointOperationMetadata', - 'CreateEndpointRequest', - 'DeleteEndpointRequest', - 'DeployModelOperationMetadata', - 'DeployModelRequest', - 'DeployModelResponse', - 'GetEndpointRequest', - 'ListEndpointsRequest', - 'ListEndpointsResponse', - 'UndeployModelOperationMetadata', - 'UndeployModelRequest', - 'UndeployModelResponse', - 'UpdateEndpointRequest', - 'EntityType', - 'EnvVar', - 'Event', - 'Execution', - 'Attribution', - 'BlurBaselineConfig', - 'Explanation', - 'ExplanationMetadataOverride', - 'ExplanationParameters', - 'ExplanationSpec', - 'ExplanationSpecOverride', - 'FeatureNoiseSigma', - 'IntegratedGradientsAttribution', - 'ModelExplanation', - 'SampledShapleyAttribution', - 'Similarity', - 'SmoothGradConfig', - 'XraiAttribution', - 'ExplanationMetadata', - 'Feature', - 'FeatureStatsAnomaly', - 'FeatureSelector', - 'IdMatcher', - 'Featurestore', - 'FeaturestoreMonitoringConfig', - 'FeatureValue', - 'FeatureValueList', - 'ReadFeatureValuesRequest', - 'ReadFeatureValuesResponse', - 'StreamingReadFeatureValuesRequest', - 'BatchCreateFeaturesOperationMetadata', - 'BatchCreateFeaturesRequest', - 'BatchCreateFeaturesResponse', - 'BatchReadFeatureValuesOperationMetadata', - 'BatchReadFeatureValuesRequest', - 'BatchReadFeatureValuesResponse', - 'CreateEntityTypeOperationMetadata', - 'CreateEntityTypeRequest', - 'CreateFeatureOperationMetadata', - 'CreateFeatureRequest', - 'CreateFeaturestoreOperationMetadata', - 'CreateFeaturestoreRequest', - 'DeleteEntityTypeRequest', - 'DeleteFeatureRequest', - 'DeleteFeaturestoreRequest', - 'DestinationFeatureSetting', - 'ExportFeatureValuesOperationMetadata', - 'ExportFeatureValuesRequest', - 'ExportFeatureValuesResponse', - 'FeatureValueDestination', - 'GetEntityTypeRequest', - 'GetFeatureRequest', - 'GetFeaturestoreRequest', - 'ImportFeatureValuesOperationMetadata', - 'ImportFeatureValuesRequest', - 'ImportFeatureValuesResponse', - 'ListEntityTypesRequest', - 'ListEntityTypesResponse', - 'ListFeaturesRequest', - 'ListFeaturesResponse', - 'ListFeaturestoresRequest', - 'ListFeaturestoresResponse', - 'SearchFeaturesRequest', - 'SearchFeaturesResponse', - 'UpdateEntityTypeRequest', - 'UpdateFeatureRequest', - 'UpdateFeaturestoreOperationMetadata', - 'UpdateFeaturestoreRequest', - 'HyperparameterTuningJob', - 'Index', - 'DeployedIndex', - 'DeployedIndexAuthConfig', - 'IndexEndpoint', - 'IndexPrivateEndpoints', - 'CreateIndexEndpointOperationMetadata', - 'CreateIndexEndpointRequest', - 'DeleteIndexEndpointRequest', - 'DeployIndexOperationMetadata', - 'DeployIndexRequest', - 'DeployIndexResponse', - 'GetIndexEndpointRequest', - 'ListIndexEndpointsRequest', - 'ListIndexEndpointsResponse', - 'MutateDeployedIndexOperationMetadata', - 'MutateDeployedIndexRequest', - 'MutateDeployedIndexResponse', - 'UndeployIndexOperationMetadata', - 'UndeployIndexRequest', - 'UndeployIndexResponse', - 'UpdateIndexEndpointRequest', - 'CreateIndexOperationMetadata', - 'CreateIndexRequest', - 'DeleteIndexRequest', - 'GetIndexRequest', - 'ListIndexesRequest', - 'ListIndexesResponse', - 'NearestNeighborSearchOperationMetadata', - 'UpdateIndexOperationMetadata', - 'UpdateIndexRequest', - 'AvroSource', - 'BigQueryDestination', - 'BigQuerySource', - 'ContainerRegistryDestination', - 'CsvDestination', - 'CsvSource', - 'GcsDestination', - 'GcsSource', - 'TFRecordDestination', - 'CancelBatchPredictionJobRequest', - 'CancelCustomJobRequest', - 'CancelDataLabelingJobRequest', - 'CancelHyperparameterTuningJobRequest', - 'CreateBatchPredictionJobRequest', - 'CreateCustomJobRequest', - 'CreateDataLabelingJobRequest', - 'CreateHyperparameterTuningJobRequest', - 'CreateModelDeploymentMonitoringJobRequest', - 'DeleteBatchPredictionJobRequest', - 'DeleteCustomJobRequest', - 'DeleteDataLabelingJobRequest', - 'DeleteHyperparameterTuningJobRequest', - 'DeleteModelDeploymentMonitoringJobRequest', - 'GetBatchPredictionJobRequest', - 'GetCustomJobRequest', - 'GetDataLabelingJobRequest', - 'GetHyperparameterTuningJobRequest', - 'GetModelDeploymentMonitoringJobRequest', - 'ListBatchPredictionJobsRequest', - 'ListBatchPredictionJobsResponse', - 'ListCustomJobsRequest', - 'ListCustomJobsResponse', - 'ListDataLabelingJobsRequest', - 'ListDataLabelingJobsResponse', - 'ListHyperparameterTuningJobsRequest', - 'ListHyperparameterTuningJobsResponse', - 'ListModelDeploymentMonitoringJobsRequest', - 'ListModelDeploymentMonitoringJobsResponse', - 'PauseModelDeploymentMonitoringJobRequest', - 'ResumeModelDeploymentMonitoringJobRequest', - 'SearchModelDeploymentMonitoringStatsAnomaliesRequest', - 'SearchModelDeploymentMonitoringStatsAnomaliesResponse', - 'UpdateModelDeploymentMonitoringJobOperationMetadata', - 'UpdateModelDeploymentMonitoringJobRequest', - 'JobState', - 'LineageSubgraph', - 'AutomaticResources', - 'AutoscalingMetricSpec', - 'BatchDedicatedResources', - 'DedicatedResources', - 'DiskSpec', - 'MachineSpec', - 'ResourcesConsumed', - 'ManualBatchTuningParameters', - 'MetadataSchema', - 'AddContextArtifactsAndExecutionsRequest', - 'AddContextArtifactsAndExecutionsResponse', - 'AddContextChildrenRequest', - 'AddContextChildrenResponse', - 'AddExecutionEventsRequest', - 'AddExecutionEventsResponse', - 'CreateArtifactRequest', - 'CreateContextRequest', - 'CreateExecutionRequest', - 'CreateMetadataSchemaRequest', - 'CreateMetadataStoreOperationMetadata', - 'CreateMetadataStoreRequest', - 'DeleteArtifactRequest', - 'DeleteContextRequest', - 'DeleteExecutionRequest', - 'DeleteMetadataStoreOperationMetadata', - 'DeleteMetadataStoreRequest', - 'GetArtifactRequest', - 'GetContextRequest', - 'GetExecutionRequest', - 'GetMetadataSchemaRequest', - 'GetMetadataStoreRequest', - 'ListArtifactsRequest', - 'ListArtifactsResponse', - 'ListContextsRequest', - 'ListContextsResponse', - 'ListExecutionsRequest', - 'ListExecutionsResponse', - 'ListMetadataSchemasRequest', - 'ListMetadataSchemasResponse', - 'ListMetadataStoresRequest', - 'ListMetadataStoresResponse', - 'PurgeArtifactsMetadata', - 'PurgeArtifactsRequest', - 'PurgeArtifactsResponse', - 'PurgeContextsMetadata', - 'PurgeContextsRequest', - 'PurgeContextsResponse', - 'PurgeExecutionsMetadata', - 'PurgeExecutionsRequest', - 'PurgeExecutionsResponse', - 'QueryArtifactLineageSubgraphRequest', - 'QueryContextLineageSubgraphRequest', - 'QueryExecutionInputsAndOutputsRequest', - 'UpdateArtifactRequest', - 'UpdateContextRequest', - 'UpdateExecutionRequest', - 'MetadataStore', - 'MigratableResource', - 'BatchMigrateResourcesOperationMetadata', - 'BatchMigrateResourcesRequest', - 'BatchMigrateResourcesResponse', - 'MigrateResourceRequest', - 'MigrateResourceResponse', - 'SearchMigratableResourcesRequest', - 'SearchMigratableResourcesResponse', - 'Model', - 'ModelContainerSpec', - 'Port', - 'PredictSchemata', - 'ModelDeploymentMonitoringBigQueryTable', - 'ModelDeploymentMonitoringJob', - 'ModelDeploymentMonitoringObjectiveConfig', - 'ModelDeploymentMonitoringScheduleConfig', - 'ModelMonitoringStatsAnomalies', - 'ModelDeploymentMonitoringObjectiveType', - 'ModelEvaluation', - 'ModelEvaluationSlice', - 'ModelMonitoringAlertConfig', - 'ModelMonitoringObjectiveConfig', - 'SamplingStrategy', - 'ThresholdConfig', - 'DeleteModelRequest', - 'ExportModelOperationMetadata', - 'ExportModelRequest', - 'ExportModelResponse', - 'GetModelEvaluationRequest', - 'GetModelEvaluationSliceRequest', - 'GetModelRequest', - 'ListModelEvaluationSlicesRequest', - 'ListModelEvaluationSlicesResponse', - 'ListModelEvaluationsRequest', - 'ListModelEvaluationsResponse', - 'ListModelsRequest', - 'ListModelsResponse', - 'UpdateModelRequest', - 'UploadModelOperationMetadata', - 'UploadModelRequest', - 'UploadModelResponse', - 'DeleteOperationMetadata', - 'GenericOperationMetadata', - 'PipelineJob', - 'PipelineJobDetail', - 'PipelineTaskDetail', - 'PipelineTaskExecutorDetail', - 'CancelPipelineJobRequest', - 'CancelTrainingPipelineRequest', - 'CreatePipelineJobRequest', - 'CreateTrainingPipelineRequest', - 'DeletePipelineJobRequest', - 'DeleteTrainingPipelineRequest', - 'GetPipelineJobRequest', - 'GetTrainingPipelineRequest', - 'ListPipelineJobsRequest', - 'ListPipelineJobsResponse', - 'ListTrainingPipelinesRequest', - 'ListTrainingPipelinesResponse', - 'PipelineState', - 'ExplainRequest', - 'ExplainResponse', - 'PredictRequest', - 'PredictResponse', - 'RawPredictRequest', - 'SpecialistPool', - 'CreateSpecialistPoolOperationMetadata', - 'CreateSpecialistPoolRequest', - 'DeleteSpecialistPoolRequest', - 'GetSpecialistPoolRequest', - 'ListSpecialistPoolsRequest', - 'ListSpecialistPoolsResponse', - 'UpdateSpecialistPoolOperationMetadata', - 'UpdateSpecialistPoolRequest', - 'Measurement', - 'Study', - 'StudySpec', - 'Trial', - 'Tensorboard', - 'Scalar', - 'TensorboardBlob', - 'TensorboardBlobSequence', - 'TensorboardTensor', - 'TimeSeriesData', - 'TimeSeriesDataPoint', - 'TensorboardExperiment', - 'TensorboardRun', - 'BatchCreateTensorboardRunsRequest', - 'BatchCreateTensorboardRunsResponse', - 'BatchCreateTensorboardTimeSeriesRequest', - 'BatchCreateTensorboardTimeSeriesResponse', - 'BatchReadTensorboardTimeSeriesDataRequest', - 'BatchReadTensorboardTimeSeriesDataResponse', - 'CreateTensorboardExperimentRequest', - 'CreateTensorboardOperationMetadata', - 'CreateTensorboardRequest', - 'CreateTensorboardRunRequest', - 'CreateTensorboardTimeSeriesRequest', - 'DeleteTensorboardExperimentRequest', - 'DeleteTensorboardRequest', - 'DeleteTensorboardRunRequest', - 'DeleteTensorboardTimeSeriesRequest', - 'ExportTensorboardTimeSeriesDataRequest', - 'ExportTensorboardTimeSeriesDataResponse', - 'GetTensorboardExperimentRequest', - 'GetTensorboardRequest', - 'GetTensorboardRunRequest', - 'GetTensorboardTimeSeriesRequest', - 'ListTensorboardExperimentsRequest', - 'ListTensorboardExperimentsResponse', - 'ListTensorboardRunsRequest', - 'ListTensorboardRunsResponse', - 'ListTensorboardsRequest', - 'ListTensorboardsResponse', - 'ListTensorboardTimeSeriesRequest', - 'ListTensorboardTimeSeriesResponse', - 'ReadTensorboardBlobDataRequest', - 'ReadTensorboardBlobDataResponse', - 'ReadTensorboardTimeSeriesDataRequest', - 'ReadTensorboardTimeSeriesDataResponse', - 'UpdateTensorboardExperimentRequest', - 'UpdateTensorboardOperationMetadata', - 'UpdateTensorboardRequest', - 'UpdateTensorboardRunRequest', - 'UpdateTensorboardTimeSeriesRequest', - 'WriteTensorboardExperimentDataRequest', - 'WriteTensorboardExperimentDataResponse', - 'WriteTensorboardRunDataRequest', - 'WriteTensorboardRunDataResponse', - 'TensorboardTimeSeries', - 'FilterSplit', - 'FractionSplit', - 'InputDataConfig', - 'PredefinedSplit', - 'StratifiedSplit', - 'TimestampSplit', - 'TrainingPipeline', - 'BoolArray', - 'DoubleArray', - 'Int64Array', - 'StringArray', - 'UnmanagedContainerModel', - 'UserActionReference', - 'Value', - 'AddTrialMeasurementRequest', - 'CheckTrialEarlyStoppingStateMetatdata', - 'CheckTrialEarlyStoppingStateRequest', - 'CheckTrialEarlyStoppingStateResponse', - 'CompleteTrialRequest', - 'CreateStudyRequest', - 'CreateTrialRequest', - 'DeleteStudyRequest', - 'DeleteTrialRequest', - 'GetStudyRequest', - 'GetTrialRequest', - 'ListOptimalTrialsRequest', - 'ListOptimalTrialsResponse', - 'ListStudiesRequest', - 'ListStudiesResponse', - 'ListTrialsRequest', - 'ListTrialsResponse', - 'LookupStudyRequest', - 'StopTrialRequest', - 'SuggestTrialsMetadata', - 'SuggestTrialsRequest', - 'SuggestTrialsResponse', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/py.typed b/owl-bot-staging/v1beta1/google/cloud/aiplatform/py.typed deleted file mode 100644 index 228f1c51c6..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-aiplatform package uses inline types. diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance/__init__.py deleted file mode 100644 index 5f9e065de0..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance/__init__.py +++ /dev/null @@ -1,37 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.image_classification import ImageClassificationPredictionInstance -from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.image_object_detection import ImageObjectDetectionPredictionInstance -from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.image_segmentation import ImageSegmentationPredictionInstance -from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.text_classification import TextClassificationPredictionInstance -from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.text_extraction import TextExtractionPredictionInstance -from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.text_sentiment import TextSentimentPredictionInstance -from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.video_action_recognition import VideoActionRecognitionPredictionInstance -from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.video_classification import VideoClassificationPredictionInstance -from google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1.types.video_object_tracking import VideoObjectTrackingPredictionInstance - -__all__ = ('ImageClassificationPredictionInstance', - 'ImageObjectDetectionPredictionInstance', - 'ImageSegmentationPredictionInstance', - 'TextClassificationPredictionInstance', - 'TextExtractionPredictionInstance', - 'TextSentimentPredictionInstance', - 'VideoActionRecognitionPredictionInstance', - 'VideoClassificationPredictionInstance', - 'VideoObjectTrackingPredictionInstance', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance/py.typed b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance/py.typed deleted file mode 100644 index 46ccbaf568..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-aiplatform-v1beta1-schema-predict-instance package uses inline types. diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/__init__.py deleted file mode 100644 index 41ab5407a7..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/__init__.py +++ /dev/null @@ -1,38 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -from .types.image_classification import ImageClassificationPredictionInstance -from .types.image_object_detection import ImageObjectDetectionPredictionInstance -from .types.image_segmentation import ImageSegmentationPredictionInstance -from .types.text_classification import TextClassificationPredictionInstance -from .types.text_extraction import TextExtractionPredictionInstance -from .types.text_sentiment import TextSentimentPredictionInstance -from .types.video_action_recognition import VideoActionRecognitionPredictionInstance -from .types.video_classification import VideoClassificationPredictionInstance -from .types.video_object_tracking import VideoObjectTrackingPredictionInstance - -__all__ = ( -'ImageClassificationPredictionInstance', -'ImageObjectDetectionPredictionInstance', -'ImageSegmentationPredictionInstance', -'TextClassificationPredictionInstance', -'TextExtractionPredictionInstance', -'TextSentimentPredictionInstance', -'VideoActionRecognitionPredictionInstance', -'VideoClassificationPredictionInstance', -'VideoObjectTrackingPredictionInstance', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_metadata.json b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_metadata.json deleted file mode 100644 index 38379e8208..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_metadata.json +++ /dev/null @@ -1,7 +0,0 @@ - { - "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", - "language": "python", - "libraryPackage": "google.cloud.aiplatform.v1beta1.schema.predict.instance_v1beta1", - "protoPackage": "google.cloud.aiplatform.v1beta1.schema.predict.instance", - "schema": "1.0" -} diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/py.typed b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/py.typed deleted file mode 100644 index 46ccbaf568..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-aiplatform-v1beta1-schema-predict-instance package uses inline types. diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/services/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/services/__init__.py deleted file mode 100644 index 4de65971c2..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/services/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/__init__.py deleted file mode 100644 index 80a5332604..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/__init__.py +++ /dev/null @@ -1,54 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .image_classification import ( - ImageClassificationPredictionInstance, -) -from .image_object_detection import ( - ImageObjectDetectionPredictionInstance, -) -from .image_segmentation import ( - ImageSegmentationPredictionInstance, -) -from .text_classification import ( - TextClassificationPredictionInstance, -) -from .text_extraction import ( - TextExtractionPredictionInstance, -) -from .text_sentiment import ( - TextSentimentPredictionInstance, -) -from .video_action_recognition import ( - VideoActionRecognitionPredictionInstance, -) -from .video_classification import ( - VideoClassificationPredictionInstance, -) -from .video_object_tracking import ( - VideoObjectTrackingPredictionInstance, -) - -__all__ = ( - 'ImageClassificationPredictionInstance', - 'ImageObjectDetectionPredictionInstance', - 'ImageSegmentationPredictionInstance', - 'TextClassificationPredictionInstance', - 'TextExtractionPredictionInstance', - 'TextSentimentPredictionInstance', - 'VideoActionRecognitionPredictionInstance', - 'VideoClassificationPredictionInstance', - 'VideoObjectTrackingPredictionInstance', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py deleted file mode 100644 index 1f26544421..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_classification.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.instance', - manifest={ - 'ImageClassificationPredictionInstance', - }, -) - - -class ImageClassificationPredictionInstance(proto.Message): - r"""Prediction input format for Image Classification. - - Attributes: - content (str): - The image bytes or Cloud Storage URI to make - the prediction on. - mime_type (str): - The MIME type of the content of the image. - Only the images in below listed MIME types are - supported. - image/jpeg - - image/gif - - image/png - - image/webp - - image/bmp - - image/tiff - - image/vnd.microsoft.icon - """ - - content = proto.Field( - proto.STRING, - number=1, - ) - mime_type = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py deleted file mode 100644 index cfbe0848de..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_object_detection.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.instance', - manifest={ - 'ImageObjectDetectionPredictionInstance', - }, -) - - -class ImageObjectDetectionPredictionInstance(proto.Message): - r"""Prediction input format for Image Object Detection. - - Attributes: - content (str): - The image bytes or Cloud Storage URI to make - the prediction on. - mime_type (str): - The MIME type of the content of the image. - Only the images in below listed MIME types are - supported. - image/jpeg - - image/gif - - image/png - - image/webp - - image/bmp - - image/tiff - - image/vnd.microsoft.icon - """ - - content = proto.Field( - proto.STRING, - number=1, - ) - mime_type = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_segmentation.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_segmentation.py deleted file mode 100644 index 359fe2b46d..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/image_segmentation.py +++ /dev/null @@ -1,50 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.instance', - manifest={ - 'ImageSegmentationPredictionInstance', - }, -) - - -class ImageSegmentationPredictionInstance(proto.Message): - r"""Prediction input format for Image Segmentation. - - Attributes: - content (str): - The image bytes to make the predictions on. - mime_type (str): - The MIME type of the content of the image. - Only the images in below listed MIME types are - supported. - image/jpeg - - image/png - """ - - content = proto.Field( - proto.STRING, - number=1, - ) - mime_type = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_classification.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_classification.py deleted file mode 100644 index 64d73cf6d5..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_classification.py +++ /dev/null @@ -1,49 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.instance', - manifest={ - 'TextClassificationPredictionInstance', - }, -) - - -class TextClassificationPredictionInstance(proto.Message): - r"""Prediction input format for Text Classification. - - Attributes: - content (str): - The text snippet to make the predictions on. - mime_type (str): - The MIME type of the text snippet. The - supported MIME types are listed below. - - text/plain - """ - - content = proto.Field( - proto.STRING, - number=1, - ) - mime_type = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py deleted file mode 100644 index 85b3f90db3..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_extraction.py +++ /dev/null @@ -1,62 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.instance', - manifest={ - 'TextExtractionPredictionInstance', - }, -) - - -class TextExtractionPredictionInstance(proto.Message): - r"""Prediction input format for Text Extraction. - - Attributes: - content (str): - The text snippet to make the predictions on. - mime_type (str): - The MIME type of the text snippet. The - supported MIME types are listed below. - - text/plain - key (str): - This field is only used for batch prediction. - If a key is provided, the batch prediction - result will by mapped to this key. If omitted, - then the batch prediction result will contain - the entire input instance. Vertex AI will not - check if keys in the request are duplicates, so - it is up to the caller to ensure the keys are - unique. - """ - - content = proto.Field( - proto.STRING, - number=1, - ) - mime_type = proto.Field( - proto.STRING, - number=2, - ) - key = proto.Field( - proto.STRING, - number=3, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_sentiment.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_sentiment.py deleted file mode 100644 index 12f3d1bbcb..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/text_sentiment.py +++ /dev/null @@ -1,49 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.instance', - manifest={ - 'TextSentimentPredictionInstance', - }, -) - - -class TextSentimentPredictionInstance(proto.Message): - r"""Prediction input format for Text Sentiment. - - Attributes: - content (str): - The text snippet to make the predictions on. - mime_type (str): - The MIME type of the text snippet. The - supported MIME types are listed below. - - text/plain - """ - - content = proto.Field( - proto.STRING, - number=1, - ) - mime_type = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_action_recognition.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_action_recognition.py deleted file mode 100644 index 8a1c9df47f..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_action_recognition.py +++ /dev/null @@ -1,73 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.instance', - manifest={ - 'VideoActionRecognitionPredictionInstance', - }, -) - - -class VideoActionRecognitionPredictionInstance(proto.Message): - r"""Prediction input format for Video Action Recognition. - - Attributes: - content (str): - The Google Cloud Storage location of the - video on which to perform the prediction. - mime_type (str): - The MIME type of the content of the video. - Only the following are supported: video/mp4 - video/avi video/quicktime - time_segment_start (str): - The beginning, inclusive, of the video's time - segment on which to perform the prediction. - Expressed as a number of seconds as measured - from the start of the video, with "s" appended - at the end. Fractions are allowed, up to a - microsecond precision. - time_segment_end (str): - The end, exclusive, of the video's time - segment on which to perform the prediction. - Expressed as a number of seconds as measured - from the start of the video, with "s" appended - at the end. Fractions are allowed, up to a - microsecond precision, and "inf" or "Infinity" - is allowed, which means the end of the video. - """ - - content = proto.Field( - proto.STRING, - number=1, - ) - mime_type = proto.Field( - proto.STRING, - number=2, - ) - time_segment_start = proto.Field( - proto.STRING, - number=3, - ) - time_segment_end = proto.Field( - proto.STRING, - number=4, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_classification.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_classification.py deleted file mode 100644 index fee71d3894..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_classification.py +++ /dev/null @@ -1,73 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.instance', - manifest={ - 'VideoClassificationPredictionInstance', - }, -) - - -class VideoClassificationPredictionInstance(proto.Message): - r"""Prediction input format for Video Classification. - - Attributes: - content (str): - The Google Cloud Storage location of the - video on which to perform the prediction. - mime_type (str): - The MIME type of the content of the video. - Only the following are supported: video/mp4 - video/avi video/quicktime - time_segment_start (str): - The beginning, inclusive, of the video's time - segment on which to perform the prediction. - Expressed as a number of seconds as measured - from the start of the video, with "s" appended - at the end. Fractions are allowed, up to a - microsecond precision. - time_segment_end (str): - The end, exclusive, of the video's time - segment on which to perform the prediction. - Expressed as a number of seconds as measured - from the start of the video, with "s" appended - at the end. Fractions are allowed, up to a - microsecond precision, and "inf" or "Infinity" - is allowed, which means the end of the video. - """ - - content = proto.Field( - proto.STRING, - number=1, - ) - mime_type = proto.Field( - proto.STRING, - number=2, - ) - time_segment_start = proto.Field( - proto.STRING, - number=3, - ) - time_segment_end = proto.Field( - proto.STRING, - number=4, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_object_tracking.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_object_tracking.py deleted file mode 100644 index 19e15615f1..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/types/video_object_tracking.py +++ /dev/null @@ -1,73 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.instance', - manifest={ - 'VideoObjectTrackingPredictionInstance', - }, -) - - -class VideoObjectTrackingPredictionInstance(proto.Message): - r"""Prediction input format for Video Object Tracking. - - Attributes: - content (str): - The Google Cloud Storage location of the - video on which to perform the prediction. - mime_type (str): - The MIME type of the content of the video. - Only the following are supported: video/mp4 - video/avi video/quicktime - time_segment_start (str): - The beginning, inclusive, of the video's time - segment on which to perform the prediction. - Expressed as a number of seconds as measured - from the start of the video, with "s" appended - at the end. Fractions are allowed, up to a - microsecond precision. - time_segment_end (str): - The end, exclusive, of the video's time - segment on which to perform the prediction. - Expressed as a number of seconds as measured - from the start of the video, with "s" appended - at the end. Fractions are allowed, up to a - microsecond precision, and "inf" or "Infinity" - is allowed, which means the end of the video. - """ - - content = proto.Field( - proto.STRING, - number=1, - ) - mime_type = proto.Field( - proto.STRING, - number=2, - ) - time_segment_start = proto.Field( - proto.STRING, - number=3, - ) - time_segment_end = proto.Field( - proto.STRING, - number=4, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params/__init__.py deleted file mode 100644 index 464c39f26c..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params/__init__.py +++ /dev/null @@ -1,31 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -from google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types.image_classification import ImageClassificationPredictionParams -from google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types.image_object_detection import ImageObjectDetectionPredictionParams -from google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types.image_segmentation import ImageSegmentationPredictionParams -from google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types.video_action_recognition import VideoActionRecognitionPredictionParams -from google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types.video_classification import VideoClassificationPredictionParams -from google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1.types.video_object_tracking import VideoObjectTrackingPredictionParams - -__all__ = ('ImageClassificationPredictionParams', - 'ImageObjectDetectionPredictionParams', - 'ImageSegmentationPredictionParams', - 'VideoActionRecognitionPredictionParams', - 'VideoClassificationPredictionParams', - 'VideoObjectTrackingPredictionParams', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params/py.typed b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params/py.typed deleted file mode 100644 index acdcd7bc60..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-aiplatform-v1beta1-schema-predict-params package uses inline types. diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/__init__.py deleted file mode 100644 index 91b718b437..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/__init__.py +++ /dev/null @@ -1,32 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -from .types.image_classification import ImageClassificationPredictionParams -from .types.image_object_detection import ImageObjectDetectionPredictionParams -from .types.image_segmentation import ImageSegmentationPredictionParams -from .types.video_action_recognition import VideoActionRecognitionPredictionParams -from .types.video_classification import VideoClassificationPredictionParams -from .types.video_object_tracking import VideoObjectTrackingPredictionParams - -__all__ = ( -'ImageClassificationPredictionParams', -'ImageObjectDetectionPredictionParams', -'ImageSegmentationPredictionParams', -'VideoActionRecognitionPredictionParams', -'VideoClassificationPredictionParams', -'VideoObjectTrackingPredictionParams', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_metadata.json b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_metadata.json deleted file mode 100644 index 6b925dd9dc..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_metadata.json +++ /dev/null @@ -1,7 +0,0 @@ - { - "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", - "language": "python", - "libraryPackage": "google.cloud.aiplatform.v1beta1.schema.predict.params_v1beta1", - "protoPackage": "google.cloud.aiplatform.v1beta1.schema.predict.params", - "schema": "1.0" -} diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/py.typed b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/py.typed deleted file mode 100644 index acdcd7bc60..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-aiplatform-v1beta1-schema-predict-params package uses inline types. diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/services/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/services/__init__.py deleted file mode 100644 index 4de65971c2..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/services/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/__init__.py deleted file mode 100644 index 70a92bb59c..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/__init__.py +++ /dev/null @@ -1,42 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .image_classification import ( - ImageClassificationPredictionParams, -) -from .image_object_detection import ( - ImageObjectDetectionPredictionParams, -) -from .image_segmentation import ( - ImageSegmentationPredictionParams, -) -from .video_action_recognition import ( - VideoActionRecognitionPredictionParams, -) -from .video_classification import ( - VideoClassificationPredictionParams, -) -from .video_object_tracking import ( - VideoObjectTrackingPredictionParams, -) - -__all__ = ( - 'ImageClassificationPredictionParams', - 'ImageObjectDetectionPredictionParams', - 'ImageSegmentationPredictionParams', - 'VideoActionRecognitionPredictionParams', - 'VideoClassificationPredictionParams', - 'VideoObjectTrackingPredictionParams', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_classification.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_classification.py deleted file mode 100644 index 26247cda3b..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_classification.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.params', - manifest={ - 'ImageClassificationPredictionParams', - }, -) - - -class ImageClassificationPredictionParams(proto.Message): - r"""Prediction model parameters for Image Classification. - - Attributes: - confidence_threshold (float): - The Model only returns predictions with at - least this confidence score. Default value is - 0.0 - max_predictions (int): - The Model only returns up to that many top, - by confidence score, predictions per instance. - If this number is very high, the Model may - return fewer predictions. Default value is 10. - """ - - confidence_threshold = proto.Field( - proto.FLOAT, - number=1, - ) - max_predictions = proto.Field( - proto.INT32, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_object_detection.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_object_detection.py deleted file mode 100644 index 307c6e743c..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_object_detection.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.params', - manifest={ - 'ImageObjectDetectionPredictionParams', - }, -) - - -class ImageObjectDetectionPredictionParams(proto.Message): - r"""Prediction model parameters for Image Object Detection. - - Attributes: - confidence_threshold (float): - The Model only returns predictions with at - least this confidence score. Default value is - 0.0 - max_predictions (int): - The Model only returns up to that many top, - by confidence score, predictions per instance. - Note that number of returned predictions is also - limited by metadata's predictionsLimit. Default - value is 10. - """ - - confidence_threshold = proto.Field( - proto.FLOAT, - number=1, - ) - max_predictions = proto.Field( - proto.INT32, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_segmentation.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_segmentation.py deleted file mode 100644 index c60fd1a5a5..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/image_segmentation.py +++ /dev/null @@ -1,45 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.params', - manifest={ - 'ImageSegmentationPredictionParams', - }, -) - - -class ImageSegmentationPredictionParams(proto.Message): - r"""Prediction model parameters for Image Segmentation. - - Attributes: - confidence_threshold (float): - When the model predicts category of pixels of - the image, it will only provide predictions for - pixels that it is at least this much confident - about. All other pixels will be classified as - background. Default value is 0.5. - """ - - confidence_threshold = proto.Field( - proto.FLOAT, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_action_recognition.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_action_recognition.py deleted file mode 100644 index b5ab67fd1e..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_action_recognition.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.params', - manifest={ - 'VideoActionRecognitionPredictionParams', - }, -) - - -class VideoActionRecognitionPredictionParams(proto.Message): - r"""Prediction model parameters for Video Action Recognition. - - Attributes: - confidence_threshold (float): - The Model only returns predictions with at - least this confidence score. Default value is - 0.0 - max_predictions (int): - The model only returns up to that many top, - by confidence score, predictions per frame of - the video. If this number is very high, the - Model may return fewer predictions per frame. - Default value is 50. - """ - - confidence_threshold = proto.Field( - proto.FLOAT, - number=1, - ) - max_predictions = proto.Field( - proto.INT32, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py deleted file mode 100644 index 6f2f2d35c7..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_classification.py +++ /dev/null @@ -1,95 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.params', - manifest={ - 'VideoClassificationPredictionParams', - }, -) - - -class VideoClassificationPredictionParams(proto.Message): - r"""Prediction model parameters for Video Classification. - - Attributes: - confidence_threshold (float): - The Model only returns predictions with at - least this confidence score. Default value is - 0.0 - max_predictions (int): - The Model only returns up to that many top, - by confidence score, predictions per instance. - If this number is very high, the Model may - return fewer predictions. Default value is - 10,000. - segment_classification (bool): - Set to true to request segment-level - classification. Vertex AI returns labels and - their confidence scores for the entire time - segment of the video that user specified in the - input instance. Default value is true - shot_classification (bool): - Set to true to request shot-level - classification. Vertex AI determines the - boundaries for each camera shot in the entire - time segment of the video that user specified in - the input instance. Vertex AI then returns - labels and their confidence scores for each - detected shot, along with the start and end time - of the shot. - WARNING: Model evaluation is not done for this - classification type, the quality of it depends - on the training data, but there are no metrics - provided to describe that quality. - Default value is false - one_sec_interval_classification (bool): - Set to true to request classification for a - video at one-second intervals. Vertex AI returns - labels and their confidence scores for each - second of the entire time segment of the video - that user specified in the input WARNING: Model - evaluation is not done for this classification - type, the quality of it depends on the training - data, but there are no metrics provided to - describe that quality. Default value is false - """ - - confidence_threshold = proto.Field( - proto.FLOAT, - number=1, - ) - max_predictions = proto.Field( - proto.INT32, - number=2, - ) - segment_classification = proto.Field( - proto.BOOL, - number=3, - ) - shot_classification = proto.Field( - proto.BOOL, - number=4, - ) - one_sec_interval_classification = proto.Field( - proto.BOOL, - number=5, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_object_tracking.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_object_tracking.py deleted file mode 100644 index 8888dca9d3..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/types/video_object_tracking.py +++ /dev/null @@ -1,61 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.params', - manifest={ - 'VideoObjectTrackingPredictionParams', - }, -) - - -class VideoObjectTrackingPredictionParams(proto.Message): - r"""Prediction model parameters for Video Object Tracking. - - Attributes: - confidence_threshold (float): - The Model only returns predictions with at - least this confidence score. Default value is - 0.0 - max_predictions (int): - The model only returns up to that many top, - by confidence score, predictions per frame of - the video. If this number is very high, the - Model may return fewer predictions per frame. - Default value is 50. - min_bounding_box_size (float): - Only bounding boxes with shortest edge at - least that long as a relative value of video - frame size are returned. Default value is 0.0. - """ - - confidence_threshold = proto.Field( - proto.FLOAT, - number=1, - ) - max_predictions = proto.Field( - proto.INT32, - number=2, - ) - min_bounding_box_size = proto.Field( - proto.FLOAT, - number=3, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction/__init__.py deleted file mode 100644 index 0b54451ca0..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction/__init__.py +++ /dev/null @@ -1,41 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.classification import ClassificationPredictionResult -from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.image_object_detection import ImageObjectDetectionPredictionResult -from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.image_segmentation import ImageSegmentationPredictionResult -from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.tabular_classification import TabularClassificationPredictionResult -from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.tabular_regression import TabularRegressionPredictionResult -from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.text_extraction import TextExtractionPredictionResult -from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.text_sentiment import TextSentimentPredictionResult -from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.time_series_forecasting import TimeSeriesForecastingPredictionResult -from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.video_action_recognition import VideoActionRecognitionPredictionResult -from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.video_classification import VideoClassificationPredictionResult -from google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.video_object_tracking import VideoObjectTrackingPredictionResult - -__all__ = ('ClassificationPredictionResult', - 'ImageObjectDetectionPredictionResult', - 'ImageSegmentationPredictionResult', - 'TabularClassificationPredictionResult', - 'TabularRegressionPredictionResult', - 'TextExtractionPredictionResult', - 'TextSentimentPredictionResult', - 'TimeSeriesForecastingPredictionResult', - 'VideoActionRecognitionPredictionResult', - 'VideoClassificationPredictionResult', - 'VideoObjectTrackingPredictionResult', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction/py.typed b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction/py.typed deleted file mode 100644 index 8cf97d7107..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-aiplatform-v1beta1-schema-predict-prediction package uses inline types. diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/__init__.py deleted file mode 100644 index 495759c24b..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/__init__.py +++ /dev/null @@ -1,42 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -from .types.classification import ClassificationPredictionResult -from .types.image_object_detection import ImageObjectDetectionPredictionResult -from .types.image_segmentation import ImageSegmentationPredictionResult -from .types.tabular_classification import TabularClassificationPredictionResult -from .types.tabular_regression import TabularRegressionPredictionResult -from .types.text_extraction import TextExtractionPredictionResult -from .types.text_sentiment import TextSentimentPredictionResult -from .types.time_series_forecasting import TimeSeriesForecastingPredictionResult -from .types.video_action_recognition import VideoActionRecognitionPredictionResult -from .types.video_classification import VideoClassificationPredictionResult -from .types.video_object_tracking import VideoObjectTrackingPredictionResult - -__all__ = ( -'ClassificationPredictionResult', -'ImageObjectDetectionPredictionResult', -'ImageSegmentationPredictionResult', -'TabularClassificationPredictionResult', -'TabularRegressionPredictionResult', -'TextExtractionPredictionResult', -'TextSentimentPredictionResult', -'TimeSeriesForecastingPredictionResult', -'VideoActionRecognitionPredictionResult', -'VideoClassificationPredictionResult', -'VideoObjectTrackingPredictionResult', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_metadata.json b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_metadata.json deleted file mode 100644 index 99d3dc6402..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_metadata.json +++ /dev/null @@ -1,7 +0,0 @@ - { - "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", - "language": "python", - "libraryPackage": "google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1", - "protoPackage": "google.cloud.aiplatform.v1beta1.schema.predict.prediction", - "schema": "1.0" -} diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/py.typed b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/py.typed deleted file mode 100644 index 8cf97d7107..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-aiplatform-v1beta1-schema-predict-prediction package uses inline types. diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/services/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/services/__init__.py deleted file mode 100644 index 4de65971c2..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/services/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/__init__.py deleted file mode 100644 index f3b70f66dd..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/__init__.py +++ /dev/null @@ -1,62 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .classification import ( - ClassificationPredictionResult, -) -from .image_object_detection import ( - ImageObjectDetectionPredictionResult, -) -from .image_segmentation import ( - ImageSegmentationPredictionResult, -) -from .tabular_classification import ( - TabularClassificationPredictionResult, -) -from .tabular_regression import ( - TabularRegressionPredictionResult, -) -from .text_extraction import ( - TextExtractionPredictionResult, -) -from .text_sentiment import ( - TextSentimentPredictionResult, -) -from .time_series_forecasting import ( - TimeSeriesForecastingPredictionResult, -) -from .video_action_recognition import ( - VideoActionRecognitionPredictionResult, -) -from .video_classification import ( - VideoClassificationPredictionResult, -) -from .video_object_tracking import ( - VideoObjectTrackingPredictionResult, -) - -__all__ = ( - 'ClassificationPredictionResult', - 'ImageObjectDetectionPredictionResult', - 'ImageSegmentationPredictionResult', - 'TabularClassificationPredictionResult', - 'TabularRegressionPredictionResult', - 'TextExtractionPredictionResult', - 'TextSentimentPredictionResult', - 'TimeSeriesForecastingPredictionResult', - 'VideoActionRecognitionPredictionResult', - 'VideoClassificationPredictionResult', - 'VideoObjectTrackingPredictionResult', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py deleted file mode 100644 index 80c12d2a0a..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/classification.py +++ /dev/null @@ -1,57 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', - manifest={ - 'ClassificationPredictionResult', - }, -) - - -class ClassificationPredictionResult(proto.Message): - r"""Prediction output format for Image and Text Classification. - - Attributes: - ids (Sequence[int]): - The resource IDs of the AnnotationSpecs that - had been identified. - display_names (Sequence[str]): - The display names of the AnnotationSpecs that - had been identified, order matches the IDs. - confidences (Sequence[float]): - The Model's confidences in correctness of the - predicted IDs, higher value means higher - confidence. Order matches the Ids. - """ - - ids = proto.RepeatedField( - proto.INT64, - number=1, - ) - display_names = proto.RepeatedField( - proto.STRING, - number=2, - ) - confidences = proto.RepeatedField( - proto.FLOAT, - number=3, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_object_detection.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_object_detection.py deleted file mode 100644 index d8207792e8..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_object_detection.py +++ /dev/null @@ -1,73 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import struct_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', - manifest={ - 'ImageObjectDetectionPredictionResult', - }, -) - - -class ImageObjectDetectionPredictionResult(proto.Message): - r"""Prediction output format for Image Object Detection. - - Attributes: - ids (Sequence[int]): - The resource IDs of the AnnotationSpecs that - had been identified, ordered by the confidence - score descendingly. - display_names (Sequence[str]): - The display names of the AnnotationSpecs that - had been identified, order matches the IDs. - confidences (Sequence[float]): - The Model's confidences in correctness of the - predicted IDs, higher value means higher - confidence. Order matches the Ids. - bboxes (Sequence[google.protobuf.struct_pb2.ListValue]): - Bounding boxes, i.e. the rectangles over the image, that - pinpoint the found AnnotationSpecs. Given in order that - matches the IDs. Each bounding box is an array of 4 numbers - ``xMin``, ``xMax``, ``yMin``, and ``yMax``, which represent - the extremal coordinates of the box. They are relative to - the image size, and the point 0,0 is in the top left of the - image. - """ - - ids = proto.RepeatedField( - proto.INT64, - number=1, - ) - display_names = proto.RepeatedField( - proto.STRING, - number=2, - ) - confidences = proto.RepeatedField( - proto.FLOAT, - number=3, - ) - bboxes = proto.RepeatedField( - proto.MESSAGE, - number=4, - message=struct_pb2.ListValue, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_segmentation.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_segmentation.py deleted file mode 100644 index df4e2907f9..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/image_segmentation.py +++ /dev/null @@ -1,62 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', - manifest={ - 'ImageSegmentationPredictionResult', - }, -) - - -class ImageSegmentationPredictionResult(proto.Message): - r"""Prediction output format for Image Segmentation. - - Attributes: - category_mask (str): - A PNG image where each pixel in the mask - represents the category in which the pixel in - the original image was predicted to belong to. - The size of this image will be the same as the - original image. The mapping between the - AnntoationSpec and the color can be found in - model's metadata. The model will choose the most - likely category and if none of the categories - reach the confidence threshold, the pixel will - be marked as background. - confidence_mask (str): - A one channel image which is encoded as an - 8bit lossless PNG. The size of the image will be - the same as the original image. For a specific - pixel, darker color means less confidence in - correctness of the cateogry in the categoryMask - for the corresponding pixel. Black means no - confidence and white means complete confidence. - """ - - category_mask = proto.Field( - proto.STRING, - number=1, - ) - confidence_mask = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_classification.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_classification.py deleted file mode 100644 index df55dda685..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_classification.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', - manifest={ - 'TabularClassificationPredictionResult', - }, -) - - -class TabularClassificationPredictionResult(proto.Message): - r"""Prediction output format for Tabular Classification. - - Attributes: - classes (Sequence[str]): - The name of the classes being classified, - contains all possible values of the target - column. - scores (Sequence[float]): - The model's confidence in each class being - correct, higher value means higher confidence. - The N-th score corresponds to the N-th class in - classes. - """ - - classes = proto.RepeatedField( - proto.STRING, - number=1, - ) - scores = proto.RepeatedField( - proto.FLOAT, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_regression.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_regression.py deleted file mode 100644 index fac8e1e6d5..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/tabular_regression.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', - manifest={ - 'TabularRegressionPredictionResult', - }, -) - - -class TabularRegressionPredictionResult(proto.Message): - r"""Prediction output format for Tabular Regression. - - Attributes: - value (float): - The regression value. - lower_bound (float): - The lower bound of the prediction interval. - upper_bound (float): - The upper bound of the prediction interval. - """ - - value = proto.Field( - proto.FLOAT, - number=1, - ) - lower_bound = proto.Field( - proto.FLOAT, - number=2, - ) - upper_bound = proto.Field( - proto.FLOAT, - number=3, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_extraction.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_extraction.py deleted file mode 100644 index b2a28d4c84..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_extraction.py +++ /dev/null @@ -1,78 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', - manifest={ - 'TextExtractionPredictionResult', - }, -) - - -class TextExtractionPredictionResult(proto.Message): - r"""Prediction output format for Text Extraction. - - Attributes: - ids (Sequence[int]): - The resource IDs of the AnnotationSpecs that - had been identified, ordered by the confidence - score descendingly. - display_names (Sequence[str]): - The display names of the AnnotationSpecs that - had been identified, order matches the IDs. - text_segment_start_offsets (Sequence[int]): - The start offsets, inclusive, of the text - segment in which the AnnotationSpec has been - identified. Expressed as a zero-based number of - characters as measured from the start of the - text snippet. - text_segment_end_offsets (Sequence[int]): - The end offsets, inclusive, of the text - segment in which the AnnotationSpec has been - identified. Expressed as a zero-based number of - characters as measured from the start of the - text snippet. - confidences (Sequence[float]): - The Model's confidences in correctness of the - predicted IDs, higher value means higher - confidence. Order matches the Ids. - """ - - ids = proto.RepeatedField( - proto.INT64, - number=1, - ) - display_names = proto.RepeatedField( - proto.STRING, - number=2, - ) - text_segment_start_offsets = proto.RepeatedField( - proto.INT64, - number=3, - ) - text_segment_end_offsets = proto.RepeatedField( - proto.INT64, - number=4, - ) - confidences = proto.RepeatedField( - proto.FLOAT, - number=5, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_sentiment.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_sentiment.py deleted file mode 100644 index c041c2581c..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/text_sentiment.py +++ /dev/null @@ -1,48 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', - manifest={ - 'TextSentimentPredictionResult', - }, -) - - -class TextSentimentPredictionResult(proto.Message): - r"""Prediction output format for Text Sentiment - - Attributes: - sentiment (int): - The integer sentiment labels between 0 - (inclusive) and sentimentMax label (inclusive), - while 0 maps to the least positive sentiment and - sentimentMax maps to the most positive one. The - higher the score is, the more positive the - sentiment in the text snippet is. Note: - sentimentMax is an integer value between 1 - (inclusive) and 10 (inclusive). - """ - - sentiment = proto.Field( - proto.INT32, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/time_series_forecasting.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/time_series_forecasting.py deleted file mode 100644 index 37c9354c69..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/time_series_forecasting.py +++ /dev/null @@ -1,41 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', - manifest={ - 'TimeSeriesForecastingPredictionResult', - }, -) - - -class TimeSeriesForecastingPredictionResult(proto.Message): - r"""Prediction output format for Time Series Forecasting. - - Attributes: - value (float): - The regression value. - """ - - value = proto.Field( - proto.FLOAT, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_action_recognition.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_action_recognition.py deleted file mode 100644 index 4682a1a58e..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_action_recognition.py +++ /dev/null @@ -1,85 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import wrappers_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', - manifest={ - 'VideoActionRecognitionPredictionResult', - }, -) - - -class VideoActionRecognitionPredictionResult(proto.Message): - r"""Prediction output format for Video Action Recognition. - - Attributes: - id (str): - The resource ID of the AnnotationSpec that - had been identified. - display_name (str): - The display name of the AnnotationSpec that - had been identified. - time_segment_start (google.protobuf.duration_pb2.Duration): - The beginning, inclusive, of the video's time - segment in which the AnnotationSpec has been - identified. Expressed as a number of seconds as - measured from the start of the video, with - fractions up to a microsecond precision, and - with "s" appended at the end. - time_segment_end (google.protobuf.duration_pb2.Duration): - The end, exclusive, of the video's time - segment in which the AnnotationSpec has been - identified. Expressed as a number of seconds as - measured from the start of the video, with - fractions up to a microsecond precision, and - with "s" appended at the end. - confidence (google.protobuf.wrappers_pb2.FloatValue): - The Model's confidence in correction of this - prediction, higher value means higher - confidence. - """ - - id = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - time_segment_start = proto.Field( - proto.MESSAGE, - number=4, - message=duration_pb2.Duration, - ) - time_segment_end = proto.Field( - proto.MESSAGE, - number=5, - message=duration_pb2.Duration, - ) - confidence = proto.Field( - proto.MESSAGE, - number=6, - message=wrappers_pb2.FloatValue, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_classification.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_classification.py deleted file mode 100644 index 2767545e55..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_classification.py +++ /dev/null @@ -1,103 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import wrappers_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', - manifest={ - 'VideoClassificationPredictionResult', - }, -) - - -class VideoClassificationPredictionResult(proto.Message): - r"""Prediction output format for Video Classification. - - Attributes: - id (str): - The resource ID of the AnnotationSpec that - had been identified. - display_name (str): - The display name of the AnnotationSpec that - had been identified. - type_ (str): - The type of the prediction. The requested - types can be configured via parameters. This - will be one of - segment-classification - - shot-classification - - one-sec-interval-classification - time_segment_start (google.protobuf.duration_pb2.Duration): - The beginning, inclusive, of the video's time - segment in which the AnnotationSpec has been - identified. Expressed as a number of seconds as - measured from the start of the video, with - fractions up to a microsecond precision, and - with "s" appended at the end. Note that for - 'segment-classification' prediction type, this - equals the original 'timeSegmentStart' from the - input instance, for other types it is the start - of a shot or a 1 second interval respectively. - time_segment_end (google.protobuf.duration_pb2.Duration): - The end, exclusive, of the video's time - segment in which the AnnotationSpec has been - identified. Expressed as a number of seconds as - measured from the start of the video, with - fractions up to a microsecond precision, and - with "s" appended at the end. Note that for - 'segment-classification' prediction type, this - equals the original 'timeSegmentEnd' from the - input instance, for other types it is the end of - a shot or a 1 second interval respectively. - confidence (google.protobuf.wrappers_pb2.FloatValue): - The Model's confidence in correction of this - prediction, higher value means higher - confidence. - """ - - id = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - type_ = proto.Field( - proto.STRING, - number=3, - ) - time_segment_start = proto.Field( - proto.MESSAGE, - number=4, - message=duration_pb2.Duration, - ) - time_segment_end = proto.Field( - proto.MESSAGE, - number=5, - message=duration_pb2.Duration, - ) - confidence = proto.Field( - proto.MESSAGE, - number=6, - message=wrappers_pb2.FloatValue, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_object_tracking.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_object_tracking.py deleted file mode 100644 index cbbaece502..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/types/video_object_tracking.py +++ /dev/null @@ -1,145 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import wrappers_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.predict.prediction', - manifest={ - 'VideoObjectTrackingPredictionResult', - }, -) - - -class VideoObjectTrackingPredictionResult(proto.Message): - r"""Prediction output format for Video Object Tracking. - - Attributes: - id (str): - The resource ID of the AnnotationSpec that - had been identified. - display_name (str): - The display name of the AnnotationSpec that - had been identified. - time_segment_start (google.protobuf.duration_pb2.Duration): - The beginning, inclusive, of the video's time - segment in which the object instance has been - detected. Expressed as a number of seconds as - measured from the start of the video, with - fractions up to a microsecond precision, and - with "s" appended at the end. - time_segment_end (google.protobuf.duration_pb2.Duration): - The end, inclusive, of the video's time - segment in which the object instance has been - detected. Expressed as a number of seconds as - measured from the start of the video, with - fractions up to a microsecond precision, and - with "s" appended at the end. - confidence (google.protobuf.wrappers_pb2.FloatValue): - The Model's confidence in correction of this - prediction, higher value means higher - confidence. - frames (Sequence[google.cloud.aiplatform.v1beta1.schema.predict.prediction_v1beta1.types.VideoObjectTrackingPredictionResult.Frame]): - All of the frames of the video in which a - single object instance has been detected. The - bounding boxes in the frames identify the same - object. - """ - - class Frame(proto.Message): - r"""The fields ``xMin``, ``xMax``, ``yMin``, and ``yMax`` refer to a - bounding box, i.e. the rectangle over the video frame pinpointing - the found AnnotationSpec. The coordinates are relative to the frame - size, and the point 0,0 is in the top left of the frame. - - Attributes: - time_offset (google.protobuf.duration_pb2.Duration): - A time (frame) of a video in which the object - has been detected. Expressed as a number of - seconds as measured from the start of the video, - with fractions up to a microsecond precision, - and with "s" appended at the end. - x_min (google.protobuf.wrappers_pb2.FloatValue): - The leftmost coordinate of the bounding box. - x_max (google.protobuf.wrappers_pb2.FloatValue): - The rightmost coordinate of the bounding box. - y_min (google.protobuf.wrappers_pb2.FloatValue): - The topmost coordinate of the bounding box. - y_max (google.protobuf.wrappers_pb2.FloatValue): - The bottommost coordinate of the bounding - box. - """ - - time_offset = proto.Field( - proto.MESSAGE, - number=1, - message=duration_pb2.Duration, - ) - x_min = proto.Field( - proto.MESSAGE, - number=2, - message=wrappers_pb2.FloatValue, - ) - x_max = proto.Field( - proto.MESSAGE, - number=3, - message=wrappers_pb2.FloatValue, - ) - y_min = proto.Field( - proto.MESSAGE, - number=4, - message=wrappers_pb2.FloatValue, - ) - y_max = proto.Field( - proto.MESSAGE, - number=5, - message=wrappers_pb2.FloatValue, - ) - - id = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - time_segment_start = proto.Field( - proto.MESSAGE, - number=3, - message=duration_pb2.Duration, - ) - time_segment_end = proto.Field( - proto.MESSAGE, - number=4, - message=duration_pb2.Duration, - ) - confidence = proto.Field( - proto.MESSAGE, - number=5, - message=wrappers_pb2.FloatValue, - ) - frames = proto.RepeatedField( - proto.MESSAGE, - number=6, - message=Frame, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/__init__.py deleted file mode 100644 index b31ea1309d..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/__init__.py +++ /dev/null @@ -1,75 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_image_classification import AutoMlImageClassification -from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_image_classification import AutoMlImageClassificationInputs -from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_image_classification import AutoMlImageClassificationMetadata -from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_image_object_detection import AutoMlImageObjectDetection -from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_image_object_detection import AutoMlImageObjectDetectionInputs -from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_image_object_detection import AutoMlImageObjectDetectionMetadata -from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_image_segmentation import AutoMlImageSegmentation -from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_image_segmentation import AutoMlImageSegmentationInputs -from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_image_segmentation import AutoMlImageSegmentationMetadata -from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_tables import AutoMlTables -from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_tables import AutoMlTablesInputs -from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_tables import AutoMlTablesMetadata -from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_text_classification import AutoMlTextClassification -from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_text_classification import AutoMlTextClassificationInputs -from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_text_extraction import AutoMlTextExtraction -from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_text_extraction import AutoMlTextExtractionInputs -from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_text_sentiment import AutoMlTextSentiment -from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_text_sentiment import AutoMlTextSentimentInputs -from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_time_series_forecasting import AutoMlForecasting -from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_time_series_forecasting import AutoMlForecastingInputs -from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_time_series_forecasting import AutoMlForecastingMetadata -from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_video_action_recognition import AutoMlVideoActionRecognition -from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_video_action_recognition import AutoMlVideoActionRecognitionInputs -from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_video_classification import AutoMlVideoClassification -from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_video_classification import AutoMlVideoClassificationInputs -from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_video_object_tracking import AutoMlVideoObjectTracking -from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.automl_video_object_tracking import AutoMlVideoObjectTrackingInputs -from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.export_evaluated_data_items_config import ExportEvaluatedDataItemsConfig - -__all__ = ('AutoMlImageClassification', - 'AutoMlImageClassificationInputs', - 'AutoMlImageClassificationMetadata', - 'AutoMlImageObjectDetection', - 'AutoMlImageObjectDetectionInputs', - 'AutoMlImageObjectDetectionMetadata', - 'AutoMlImageSegmentation', - 'AutoMlImageSegmentationInputs', - 'AutoMlImageSegmentationMetadata', - 'AutoMlTables', - 'AutoMlTablesInputs', - 'AutoMlTablesMetadata', - 'AutoMlTextClassification', - 'AutoMlTextClassificationInputs', - 'AutoMlTextExtraction', - 'AutoMlTextExtractionInputs', - 'AutoMlTextSentiment', - 'AutoMlTextSentimentInputs', - 'AutoMlForecasting', - 'AutoMlForecastingInputs', - 'AutoMlForecastingMetadata', - 'AutoMlVideoActionRecognition', - 'AutoMlVideoActionRecognitionInputs', - 'AutoMlVideoClassification', - 'AutoMlVideoClassificationInputs', - 'AutoMlVideoObjectTracking', - 'AutoMlVideoObjectTrackingInputs', - 'ExportEvaluatedDataItemsConfig', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/py.typed b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/py.typed deleted file mode 100644 index 98af260cd7..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-aiplatform-v1beta1-schema-trainingjob-definition package uses inline types. diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/__init__.py deleted file mode 100644 index 83f5b0d33f..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/__init__.py +++ /dev/null @@ -1,76 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -from .types.automl_image_classification import AutoMlImageClassification -from .types.automl_image_classification import AutoMlImageClassificationInputs -from .types.automl_image_classification import AutoMlImageClassificationMetadata -from .types.automl_image_object_detection import AutoMlImageObjectDetection -from .types.automl_image_object_detection import AutoMlImageObjectDetectionInputs -from .types.automl_image_object_detection import AutoMlImageObjectDetectionMetadata -from .types.automl_image_segmentation import AutoMlImageSegmentation -from .types.automl_image_segmentation import AutoMlImageSegmentationInputs -from .types.automl_image_segmentation import AutoMlImageSegmentationMetadata -from .types.automl_tables import AutoMlTables -from .types.automl_tables import AutoMlTablesInputs -from .types.automl_tables import AutoMlTablesMetadata -from .types.automl_text_classification import AutoMlTextClassification -from .types.automl_text_classification import AutoMlTextClassificationInputs -from .types.automl_text_extraction import AutoMlTextExtraction -from .types.automl_text_extraction import AutoMlTextExtractionInputs -from .types.automl_text_sentiment import AutoMlTextSentiment -from .types.automl_text_sentiment import AutoMlTextSentimentInputs -from .types.automl_time_series_forecasting import AutoMlForecasting -from .types.automl_time_series_forecasting import AutoMlForecastingInputs -from .types.automl_time_series_forecasting import AutoMlForecastingMetadata -from .types.automl_video_action_recognition import AutoMlVideoActionRecognition -from .types.automl_video_action_recognition import AutoMlVideoActionRecognitionInputs -from .types.automl_video_classification import AutoMlVideoClassification -from .types.automl_video_classification import AutoMlVideoClassificationInputs -from .types.automl_video_object_tracking import AutoMlVideoObjectTracking -from .types.automl_video_object_tracking import AutoMlVideoObjectTrackingInputs -from .types.export_evaluated_data_items_config import ExportEvaluatedDataItemsConfig - -__all__ = ( -'AutoMlForecasting', -'AutoMlForecastingInputs', -'AutoMlForecastingMetadata', -'AutoMlImageClassification', -'AutoMlImageClassificationInputs', -'AutoMlImageClassificationMetadata', -'AutoMlImageObjectDetection', -'AutoMlImageObjectDetectionInputs', -'AutoMlImageObjectDetectionMetadata', -'AutoMlImageSegmentation', -'AutoMlImageSegmentationInputs', -'AutoMlImageSegmentationMetadata', -'AutoMlTables', -'AutoMlTablesInputs', -'AutoMlTablesMetadata', -'AutoMlTextClassification', -'AutoMlTextClassificationInputs', -'AutoMlTextExtraction', -'AutoMlTextExtractionInputs', -'AutoMlTextSentiment', -'AutoMlTextSentimentInputs', -'AutoMlVideoActionRecognition', -'AutoMlVideoActionRecognitionInputs', -'AutoMlVideoClassification', -'AutoMlVideoClassificationInputs', -'AutoMlVideoObjectTracking', -'AutoMlVideoObjectTrackingInputs', -'ExportEvaluatedDataItemsConfig', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/gapic_metadata.json b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/gapic_metadata.json deleted file mode 100644 index 6de794c90a..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/gapic_metadata.json +++ /dev/null @@ -1,7 +0,0 @@ - { - "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", - "language": "python", - "libraryPackage": "google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1", - "protoPackage": "google.cloud.aiplatform.v1beta1.schema.trainingjob.definition", - "schema": "1.0" -} diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/py.typed b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/py.typed deleted file mode 100644 index 98af260cd7..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-aiplatform-v1beta1-schema-trainingjob-definition package uses inline types. diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/services/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/services/__init__.py deleted file mode 100644 index 4de65971c2..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/services/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/__init__.py deleted file mode 100644 index c120afb8d9..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/__init__.py +++ /dev/null @@ -1,98 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .automl_image_classification import ( - AutoMlImageClassification, - AutoMlImageClassificationInputs, - AutoMlImageClassificationMetadata, -) -from .automl_image_object_detection import ( - AutoMlImageObjectDetection, - AutoMlImageObjectDetectionInputs, - AutoMlImageObjectDetectionMetadata, -) -from .automl_image_segmentation import ( - AutoMlImageSegmentation, - AutoMlImageSegmentationInputs, - AutoMlImageSegmentationMetadata, -) -from .automl_tables import ( - AutoMlTables, - AutoMlTablesInputs, - AutoMlTablesMetadata, -) -from .automl_text_classification import ( - AutoMlTextClassification, - AutoMlTextClassificationInputs, -) -from .automl_text_extraction import ( - AutoMlTextExtraction, - AutoMlTextExtractionInputs, -) -from .automl_text_sentiment import ( - AutoMlTextSentiment, - AutoMlTextSentimentInputs, -) -from .automl_time_series_forecasting import ( - AutoMlForecasting, - AutoMlForecastingInputs, - AutoMlForecastingMetadata, -) -from .automl_video_action_recognition import ( - AutoMlVideoActionRecognition, - AutoMlVideoActionRecognitionInputs, -) -from .automl_video_classification import ( - AutoMlVideoClassification, - AutoMlVideoClassificationInputs, -) -from .automl_video_object_tracking import ( - AutoMlVideoObjectTracking, - AutoMlVideoObjectTrackingInputs, -) -from .export_evaluated_data_items_config import ( - ExportEvaluatedDataItemsConfig, -) - -__all__ = ( - 'AutoMlImageClassification', - 'AutoMlImageClassificationInputs', - 'AutoMlImageClassificationMetadata', - 'AutoMlImageObjectDetection', - 'AutoMlImageObjectDetectionInputs', - 'AutoMlImageObjectDetectionMetadata', - 'AutoMlImageSegmentation', - 'AutoMlImageSegmentationInputs', - 'AutoMlImageSegmentationMetadata', - 'AutoMlTables', - 'AutoMlTablesInputs', - 'AutoMlTablesMetadata', - 'AutoMlTextClassification', - 'AutoMlTextClassificationInputs', - 'AutoMlTextExtraction', - 'AutoMlTextExtractionInputs', - 'AutoMlTextSentiment', - 'AutoMlTextSentimentInputs', - 'AutoMlForecasting', - 'AutoMlForecastingInputs', - 'AutoMlForecastingMetadata', - 'AutoMlVideoActionRecognition', - 'AutoMlVideoActionRecognitionInputs', - 'AutoMlVideoClassification', - 'AutoMlVideoClassificationInputs', - 'AutoMlVideoObjectTracking', - 'AutoMlVideoObjectTrackingInputs', - 'ExportEvaluatedDataItemsConfig', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_classification.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_classification.py deleted file mode 100644 index d5ed62ed62..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_classification.py +++ /dev/null @@ -1,158 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.trainingjob.definition', - manifest={ - 'AutoMlImageClassification', - 'AutoMlImageClassificationInputs', - 'AutoMlImageClassificationMetadata', - }, -) - - -class AutoMlImageClassification(proto.Message): - r"""A TrainingJob that trains and uploads an AutoML Image - Classification Model. - - Attributes: - inputs (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageClassificationInputs): - The input parameters of this TrainingJob. - metadata (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageClassificationMetadata): - The metadata information. - """ - - inputs = proto.Field( - proto.MESSAGE, - number=1, - message='AutoMlImageClassificationInputs', - ) - metadata = proto.Field( - proto.MESSAGE, - number=2, - message='AutoMlImageClassificationMetadata', - ) - - -class AutoMlImageClassificationInputs(proto.Message): - r""" - - Attributes: - model_type (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageClassificationInputs.ModelType): - - base_model_id (str): - The ID of the ``base`` model. If it is specified, the new - model will be trained based on the ``base`` model. - Otherwise, the new model will be trained from scratch. The - ``base`` model must be in the same Project and Location as - the new Model to train, and have the same modelType. - budget_milli_node_hours (int): - The training budget of creating this model, expressed in - milli node hours i.e. 1,000 value in this field means 1 node - hour. The actual metadata.costMilliNodeHours will be equal - or less than this value. If further model training ceases to - provide any improvements, it will stop without using the - full budget and the metadata.successfulStopReason will be - ``model-converged``. Note, node_hour = actual_hour \* - number_of_nodes_involved. For modelType - ``cloud``\ (default), the budget must be between 8,000 and - 800,000 milli node hours, inclusive. The default value is - 192,000 which represents one day in wall time, considering 8 - nodes are used. For model types ``mobile-tf-low-latency-1``, - ``mobile-tf-versatile-1``, ``mobile-tf-high-accuracy-1``, - the training budget must be between 1,000 and 100,000 milli - node hours, inclusive. The default value is 24,000 which - represents one day in wall time on a single node that is - used. - disable_early_stopping (bool): - Use the entire training budget. This disables - the early stopping feature. When false the early - stopping feature is enabled, which means that - AutoML Image Classification might stop training - before the entire training budget has been used. - multi_label (bool): - If false, a single-label (multi-class) Model - will be trained (i.e. assuming that for each - image just up to one annotation may be - applicable). If true, a multi-label Model will - be trained (i.e. assuming that for each image - multiple annotations may be applicable). - """ - class ModelType(proto.Enum): - r"""""" - MODEL_TYPE_UNSPECIFIED = 0 - CLOUD = 1 - MOBILE_TF_LOW_LATENCY_1 = 2 - MOBILE_TF_VERSATILE_1 = 3 - MOBILE_TF_HIGH_ACCURACY_1 = 4 - - model_type = proto.Field( - proto.ENUM, - number=1, - enum=ModelType, - ) - base_model_id = proto.Field( - proto.STRING, - number=2, - ) - budget_milli_node_hours = proto.Field( - proto.INT64, - number=3, - ) - disable_early_stopping = proto.Field( - proto.BOOL, - number=4, - ) - multi_label = proto.Field( - proto.BOOL, - number=5, - ) - - -class AutoMlImageClassificationMetadata(proto.Message): - r""" - - Attributes: - cost_milli_node_hours (int): - The actual training cost of creating this - model, expressed in milli node hours, i.e. 1,000 - value in this field means 1 node hour. - Guaranteed to not exceed - inputs.budgetMilliNodeHours. - successful_stop_reason (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageClassificationMetadata.SuccessfulStopReason): - For successful job completions, this is the - reason why the job has finished. - """ - class SuccessfulStopReason(proto.Enum): - r"""""" - SUCCESSFUL_STOP_REASON_UNSPECIFIED = 0 - BUDGET_REACHED = 1 - MODEL_CONVERGED = 2 - - cost_milli_node_hours = proto.Field( - proto.INT64, - number=1, - ) - successful_stop_reason = proto.Field( - proto.ENUM, - number=2, - enum=SuccessfulStopReason, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_object_detection.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_object_detection.py deleted file mode 100644 index be87e59422..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_object_detection.py +++ /dev/null @@ -1,139 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.trainingjob.definition', - manifest={ - 'AutoMlImageObjectDetection', - 'AutoMlImageObjectDetectionInputs', - 'AutoMlImageObjectDetectionMetadata', - }, -) - - -class AutoMlImageObjectDetection(proto.Message): - r"""A TrainingJob that trains and uploads an AutoML Image Object - Detection Model. - - Attributes: - inputs (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageObjectDetectionInputs): - The input parameters of this TrainingJob. - metadata (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageObjectDetectionMetadata): - The metadata information - """ - - inputs = proto.Field( - proto.MESSAGE, - number=1, - message='AutoMlImageObjectDetectionInputs', - ) - metadata = proto.Field( - proto.MESSAGE, - number=2, - message='AutoMlImageObjectDetectionMetadata', - ) - - -class AutoMlImageObjectDetectionInputs(proto.Message): - r""" - - Attributes: - model_type (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageObjectDetectionInputs.ModelType): - - budget_milli_node_hours (int): - The training budget of creating this model, expressed in - milli node hours i.e. 1,000 value in this field means 1 node - hour. The actual metadata.costMilliNodeHours will be equal - or less than this value. If further model training ceases to - provide any improvements, it will stop without using the - full budget and the metadata.successfulStopReason will be - ``model-converged``. Note, node_hour = actual_hour \* - number_of_nodes_involved. For modelType - ``cloud``\ (default), the budget must be between 20,000 and - 900,000 milli node hours, inclusive. The default value is - 216,000 which represents one day in wall time, considering 9 - nodes are used. For model types ``mobile-tf-low-latency-1``, - ``mobile-tf-versatile-1``, ``mobile-tf-high-accuracy-1`` the - training budget must be between 1,000 and 100,000 milli node - hours, inclusive. The default value is 24,000 which - represents one day in wall time on a single node that is - used. - disable_early_stopping (bool): - Use the entire training budget. This disables - the early stopping feature. When false the early - stopping feature is enabled, which means that - AutoML Image Object Detection might stop - training before the entire training budget has - been used. - """ - class ModelType(proto.Enum): - r"""""" - MODEL_TYPE_UNSPECIFIED = 0 - CLOUD_HIGH_ACCURACY_1 = 1 - CLOUD_LOW_LATENCY_1 = 2 - MOBILE_TF_LOW_LATENCY_1 = 3 - MOBILE_TF_VERSATILE_1 = 4 - MOBILE_TF_HIGH_ACCURACY_1 = 5 - - model_type = proto.Field( - proto.ENUM, - number=1, - enum=ModelType, - ) - budget_milli_node_hours = proto.Field( - proto.INT64, - number=2, - ) - disable_early_stopping = proto.Field( - proto.BOOL, - number=3, - ) - - -class AutoMlImageObjectDetectionMetadata(proto.Message): - r""" - - Attributes: - cost_milli_node_hours (int): - The actual training cost of creating this - model, expressed in milli node hours, i.e. 1,000 - value in this field means 1 node hour. - Guaranteed to not exceed - inputs.budgetMilliNodeHours. - successful_stop_reason (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageObjectDetectionMetadata.SuccessfulStopReason): - For successful job completions, this is the - reason why the job has finished. - """ - class SuccessfulStopReason(proto.Enum): - r"""""" - SUCCESSFUL_STOP_REASON_UNSPECIFIED = 0 - BUDGET_REACHED = 1 - MODEL_CONVERGED = 2 - - cost_milli_node_hours = proto.Field( - proto.INT64, - number=1, - ) - successful_stop_reason = proto.Field( - proto.ENUM, - number=2, - enum=SuccessfulStopReason, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_segmentation.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_segmentation.py deleted file mode 100644 index ecde4f1605..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_segmentation.py +++ /dev/null @@ -1,133 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.trainingjob.definition', - manifest={ - 'AutoMlImageSegmentation', - 'AutoMlImageSegmentationInputs', - 'AutoMlImageSegmentationMetadata', - }, -) - - -class AutoMlImageSegmentation(proto.Message): - r"""A TrainingJob that trains and uploads an AutoML Image - Segmentation Model. - - Attributes: - inputs (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageSegmentationInputs): - The input parameters of this TrainingJob. - metadata (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageSegmentationMetadata): - The metadata information. - """ - - inputs = proto.Field( - proto.MESSAGE, - number=1, - message='AutoMlImageSegmentationInputs', - ) - metadata = proto.Field( - proto.MESSAGE, - number=2, - message='AutoMlImageSegmentationMetadata', - ) - - -class AutoMlImageSegmentationInputs(proto.Message): - r""" - - Attributes: - model_type (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageSegmentationInputs.ModelType): - - budget_milli_node_hours (int): - The training budget of creating this model, expressed in - milli node hours i.e. 1,000 value in this field means 1 node - hour. The actual metadata.costMilliNodeHours will be equal - or less than this value. If further model training ceases to - provide any improvements, it will stop without using the - full budget and the metadata.successfulStopReason will be - ``model-converged``. Note, node_hour = actual_hour \* - number_of_nodes_involved. Or actaul_wall_clock_hours = - train_budget_milli_node_hours / (number_of_nodes_involved \* - 1000) For modelType ``cloud-high-accuracy-1``\ (default), - the budget must be between 20,000 and 2,000,000 milli node - hours, inclusive. The default value is 192,000 which - represents one day in wall time (1000 milli \* 24 hours \* 8 - nodes). - base_model_id (str): - The ID of the ``base`` model. If it is specified, the new - model will be trained based on the ``base`` model. - Otherwise, the new model will be trained from scratch. The - ``base`` model must be in the same Project and Location as - the new Model to train, and have the same modelType. - """ - class ModelType(proto.Enum): - r"""""" - MODEL_TYPE_UNSPECIFIED = 0 - CLOUD_HIGH_ACCURACY_1 = 1 - CLOUD_LOW_ACCURACY_1 = 2 - MOBILE_TF_LOW_LATENCY_1 = 3 - - model_type = proto.Field( - proto.ENUM, - number=1, - enum=ModelType, - ) - budget_milli_node_hours = proto.Field( - proto.INT64, - number=2, - ) - base_model_id = proto.Field( - proto.STRING, - number=3, - ) - - -class AutoMlImageSegmentationMetadata(proto.Message): - r""" - - Attributes: - cost_milli_node_hours (int): - The actual training cost of creating this - model, expressed in milli node hours, i.e. 1,000 - value in this field means 1 node hour. - Guaranteed to not exceed - inputs.budgetMilliNodeHours. - successful_stop_reason (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlImageSegmentationMetadata.SuccessfulStopReason): - For successful job completions, this is the - reason why the job has finished. - """ - class SuccessfulStopReason(proto.Enum): - r"""""" - SUCCESSFUL_STOP_REASON_UNSPECIFIED = 0 - BUDGET_REACHED = 1 - MODEL_CONVERGED = 2 - - cost_milli_node_hours = proto.Field( - proto.INT64, - number=1, - ) - successful_stop_reason = proto.Field( - proto.ENUM, - number=2, - enum=SuccessfulStopReason, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_tables.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_tables.py deleted file mode 100644 index 1c9e6b847b..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_tables.py +++ /dev/null @@ -1,529 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types import export_evaluated_data_items_config as gcastd_export_evaluated_data_items_config - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.trainingjob.definition', - manifest={ - 'AutoMlTables', - 'AutoMlTablesInputs', - 'AutoMlTablesMetadata', - }, -) - - -class AutoMlTables(proto.Message): - r"""A TrainingJob that trains and uploads an AutoML Tables Model. - - Attributes: - inputs (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs): - The input parameters of this TrainingJob. - metadata (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesMetadata): - The metadata information. - """ - - inputs = proto.Field( - proto.MESSAGE, - number=1, - message='AutoMlTablesInputs', - ) - metadata = proto.Field( - proto.MESSAGE, - number=2, - message='AutoMlTablesMetadata', - ) - - -class AutoMlTablesInputs(proto.Message): - r""" - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - optimization_objective_recall_value (float): - Required when optimization_objective is - "maximize-precision-at-recall". Must be between 0 and 1, - inclusive. - - This field is a member of `oneof`_ ``additional_optimization_objective_config``. - optimization_objective_precision_value (float): - Required when optimization_objective is - "maximize-recall-at-precision". Must be between 0 and 1, - inclusive. - - This field is a member of `oneof`_ ``additional_optimization_objective_config``. - prediction_type (str): - The type of prediction the Model is to - produce. "classification" - Predict one out of - multiple target values is - picked for each row. - "regression" - Predict a value based on its - relation to other values. This - type is available only to columns that contain - semantically numeric values, i.e. integers or - floating point number, even if - stored as e.g. strings. - target_column (str): - The column name of the target column that the - model is to predict. - transformations (Sequence[google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs.Transformation]): - Each transformation will apply transform - function to given input column. And the result - will be used for training. When creating - transformation for BigQuery Struct column, the - column should be flattened using "." as the - delimiter. - optimization_objective (str): - Objective function the model is optimizing - towards. The training process creates a model - that maximizes/minimizes the value of the - objective function over the validation set. - - The supported optimization objectives depend on - the prediction type. If the field is not set, a - default objective function is used. - classification (binary): - "maximize-au-roc" (default) - Maximize the - area under the receiver - operating characteristic (ROC) curve. - "minimize-log-loss" - Minimize log loss. - "maximize-au-prc" - Maximize the area under - the precision-recall curve. "maximize- - precision-at-recall" - Maximize precision for a - specified - recall value. "maximize-recall-at-precision" - - Maximize recall for a specified - precision value. - classification (multi-class): - "minimize-log-loss" (default) - Minimize log - loss. - regression: - "minimize-rmse" (default) - Minimize root- - mean-squared error (RMSE). "minimize-mae" - - Minimize mean-absolute error (MAE). "minimize- - rmsle" - Minimize root-mean-squared log error - (RMSLE). - train_budget_milli_node_hours (int): - Required. The train budget of creating this - model, expressed in milli node hours i.e. 1,000 - value in this field means 1 node hour. - The training cost of the model will not exceed - this budget. The final cost will be attempted to - be close to the budget, though may end up being - (even) noticeably smaller - at the backend's - discretion. This especially may happen when - further model training ceases to provide any - improvements. - If the budget is set to a value known to be - insufficient to train a model for the given - dataset, the training won't be attempted and - will error. - - The train budget must be between 1,000 and - 72,000 milli node hours, inclusive. - disable_early_stopping (bool): - Use the entire training budget. This disables - the early stopping feature. By default, the - early stopping feature is enabled, which means - that AutoML Tables might stop training before - the entire training budget has been used. - weight_column_name (str): - Column name that should be used as the weight - column. Higher values in this column give more - importance to the row during model training. The - column must have numeric values between 0 and - 10000 inclusively; 0 means the row is ignored - for training. If weight column field is not set, - then all rows are assumed to have equal weight - of 1. - export_evaluated_data_items_config (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.ExportEvaluatedDataItemsConfig): - Configuration for exporting test set - predictions to a BigQuery table. If this - configuration is absent, then the export is not - performed. - additional_experiments (Sequence[str]): - Additional experiment flags for the Tables - training pipeline. - """ - - class Transformation(proto.Message): - r""" - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - auto (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs.Transformation.AutoTransformation): - - This field is a member of `oneof`_ ``transformation_detail``. - numeric (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs.Transformation.NumericTransformation): - - This field is a member of `oneof`_ ``transformation_detail``. - categorical (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs.Transformation.CategoricalTransformation): - - This field is a member of `oneof`_ ``transformation_detail``. - timestamp (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs.Transformation.TimestampTransformation): - - This field is a member of `oneof`_ ``transformation_detail``. - text (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs.Transformation.TextTransformation): - - This field is a member of `oneof`_ ``transformation_detail``. - repeated_numeric (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs.Transformation.NumericArrayTransformation): - - This field is a member of `oneof`_ ``transformation_detail``. - repeated_categorical (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs.Transformation.CategoricalArrayTransformation): - - This field is a member of `oneof`_ ``transformation_detail``. - repeated_text (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTablesInputs.Transformation.TextArrayTransformation): - - This field is a member of `oneof`_ ``transformation_detail``. - """ - - class AutoTransformation(proto.Message): - r"""Training pipeline will infer the proper transformation based - on the statistic of dataset. - - Attributes: - column_name (str): - - """ - - column_name = proto.Field( - proto.STRING, - number=1, - ) - - class NumericTransformation(proto.Message): - r"""Training pipeline will perform following transformation functions. - - - The value converted to float32. - - The z_score of the value. - - log(value+1) when the value is greater than or equal to 0. - Otherwise, this transformation is not applied and the value is - considered a missing value. - - z_score of log(value+1) when the value is greater than or equal - to 0. Otherwise, this transformation is not applied and the value - is considered a missing value. - - A boolean value that indicates whether the value is valid. - - Attributes: - column_name (str): - - invalid_values_allowed (bool): - If invalid values is allowed, the training - pipeline will create a boolean feature that - indicated whether the value is valid. Otherwise, - the training pipeline will discard the input row - from trainining data. - """ - - column_name = proto.Field( - proto.STRING, - number=1, - ) - invalid_values_allowed = proto.Field( - proto.BOOL, - number=2, - ) - - class CategoricalTransformation(proto.Message): - r"""Training pipeline will perform following transformation functions. - - - The categorical string as is--no change to case, punctuation, - spelling, tense, and so on. - - Convert the category name to a dictionary lookup index and - generate an embedding for each index. - - Categories that appear less than 5 times in the training dataset - are treated as the "unknown" category. The "unknown" category - gets its own special lookup index and resulting embedding. - - Attributes: - column_name (str): - - """ - - column_name = proto.Field( - proto.STRING, - number=1, - ) - - class TimestampTransformation(proto.Message): - r"""Training pipeline will perform following transformation functions. - - - Apply the transformation functions for Numerical columns. - - Determine the year, month, day,and weekday. Treat each value from - the - - timestamp as a Categorical column. - - Invalid numerical values (for example, values that fall outside - of a typical timestamp range, or are extreme values) receive no - special treatment and are not removed. - - Attributes: - column_name (str): - - time_format (str): - The format in which that time field is expressed. The - time_format must either be one of: - - - ``unix-seconds`` - - ``unix-milliseconds`` - - ``unix-microseconds`` - - ``unix-nanoseconds`` (for respectively number of seconds, - milliseconds, microseconds and nanoseconds since start of - the Unix epoch); or be written in ``strftime`` syntax. If - time_format is not set, then the default format is RFC - 3339 ``date-time`` format, where ``time-offset`` = - ``"Z"`` (e.g. 1985-04-12T23:20:50.52Z) - invalid_values_allowed (bool): - If invalid values is allowed, the training - pipeline will create a boolean feature that - indicated whether the value is valid. Otherwise, - the training pipeline will discard the input row - from trainining data. - """ - - column_name = proto.Field( - proto.STRING, - number=1, - ) - time_format = proto.Field( - proto.STRING, - number=2, - ) - invalid_values_allowed = proto.Field( - proto.BOOL, - number=3, - ) - - class TextTransformation(proto.Message): - r"""Training pipeline will perform following transformation functions. - - - The text as is--no change to case, punctuation, spelling, tense, - and so on. - - Tokenize text to words. Convert each words to a dictionary lookup - index and generate an embedding for each index. Combine the - embedding of all elements into a single embedding using the mean. - - Tokenization is based on unicode script boundaries. - - Missing values get their own lookup index and resulting - embedding. - - Stop-words receive no special treatment and are not removed. - - Attributes: - column_name (str): - - """ - - column_name = proto.Field( - proto.STRING, - number=1, - ) - - class NumericArrayTransformation(proto.Message): - r"""Treats the column as numerical array and performs following - transformation functions. - - - All transformations for Numerical types applied to the average of - the all elements. - - The average of empty arrays is treated as zero. - - Attributes: - column_name (str): - - invalid_values_allowed (bool): - If invalid values is allowed, the training - pipeline will create a boolean feature that - indicated whether the value is valid. Otherwise, - the training pipeline will discard the input row - from trainining data. - """ - - column_name = proto.Field( - proto.STRING, - number=1, - ) - invalid_values_allowed = proto.Field( - proto.BOOL, - number=2, - ) - - class CategoricalArrayTransformation(proto.Message): - r"""Treats the column as categorical array and performs following - transformation functions. - - - For each element in the array, convert the category name to a - dictionary lookup index and generate an embedding for each index. - Combine the embedding of all elements into a single embedding - using the mean. - - Empty arrays treated as an embedding of zeroes. - - Attributes: - column_name (str): - - """ - - column_name = proto.Field( - proto.STRING, - number=1, - ) - - class TextArrayTransformation(proto.Message): - r"""Treats the column as text array and performs following - transformation functions. - - - Concatenate all text values in the array into a single text value - using a space (" ") as a delimiter, and then treat the result as - a single text value. Apply the transformations for Text columns. - - Empty arrays treated as an empty text. - - Attributes: - column_name (str): - - """ - - column_name = proto.Field( - proto.STRING, - number=1, - ) - - auto = proto.Field( - proto.MESSAGE, - number=1, - oneof='transformation_detail', - message='AutoMlTablesInputs.Transformation.AutoTransformation', - ) - numeric = proto.Field( - proto.MESSAGE, - number=2, - oneof='transformation_detail', - message='AutoMlTablesInputs.Transformation.NumericTransformation', - ) - categorical = proto.Field( - proto.MESSAGE, - number=3, - oneof='transformation_detail', - message='AutoMlTablesInputs.Transformation.CategoricalTransformation', - ) - timestamp = proto.Field( - proto.MESSAGE, - number=4, - oneof='transformation_detail', - message='AutoMlTablesInputs.Transformation.TimestampTransformation', - ) - text = proto.Field( - proto.MESSAGE, - number=5, - oneof='transformation_detail', - message='AutoMlTablesInputs.Transformation.TextTransformation', - ) - repeated_numeric = proto.Field( - proto.MESSAGE, - number=6, - oneof='transformation_detail', - message='AutoMlTablesInputs.Transformation.NumericArrayTransformation', - ) - repeated_categorical = proto.Field( - proto.MESSAGE, - number=7, - oneof='transformation_detail', - message='AutoMlTablesInputs.Transformation.CategoricalArrayTransformation', - ) - repeated_text = proto.Field( - proto.MESSAGE, - number=8, - oneof='transformation_detail', - message='AutoMlTablesInputs.Transformation.TextArrayTransformation', - ) - - optimization_objective_recall_value = proto.Field( - proto.FLOAT, - number=5, - oneof='additional_optimization_objective_config', - ) - optimization_objective_precision_value = proto.Field( - proto.FLOAT, - number=6, - oneof='additional_optimization_objective_config', - ) - prediction_type = proto.Field( - proto.STRING, - number=1, - ) - target_column = proto.Field( - proto.STRING, - number=2, - ) - transformations = proto.RepeatedField( - proto.MESSAGE, - number=3, - message=Transformation, - ) - optimization_objective = proto.Field( - proto.STRING, - number=4, - ) - train_budget_milli_node_hours = proto.Field( - proto.INT64, - number=7, - ) - disable_early_stopping = proto.Field( - proto.BOOL, - number=8, - ) - weight_column_name = proto.Field( - proto.STRING, - number=9, - ) - export_evaluated_data_items_config = proto.Field( - proto.MESSAGE, - number=10, - message=gcastd_export_evaluated_data_items_config.ExportEvaluatedDataItemsConfig, - ) - additional_experiments = proto.RepeatedField( - proto.STRING, - number=11, - ) - - -class AutoMlTablesMetadata(proto.Message): - r"""Model metadata specific to AutoML Tables. - - Attributes: - train_cost_milli_node_hours (int): - Output only. The actual training cost of the - model, expressed in milli node hours, i.e. 1,000 - value in this field means 1 node hour. - Guaranteed to not exceed the train budget. - """ - - train_cost_milli_node_hours = proto.Field( - proto.INT64, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_classification.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_classification.py deleted file mode 100644 index 18b3d03692..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_classification.py +++ /dev/null @@ -1,58 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.trainingjob.definition', - manifest={ - 'AutoMlTextClassification', - 'AutoMlTextClassificationInputs', - }, -) - - -class AutoMlTextClassification(proto.Message): - r"""A TrainingJob that trains and uploads an AutoML Text - Classification Model. - - Attributes: - inputs (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTextClassificationInputs): - The input parameters of this TrainingJob. - """ - - inputs = proto.Field( - proto.MESSAGE, - number=1, - message='AutoMlTextClassificationInputs', - ) - - -class AutoMlTextClassificationInputs(proto.Message): - r""" - - Attributes: - multi_label (bool): - - """ - - multi_label = proto.Field( - proto.BOOL, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_extraction.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_extraction.py deleted file mode 100644 index 371b261d5c..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_extraction.py +++ /dev/null @@ -1,49 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.trainingjob.definition', - manifest={ - 'AutoMlTextExtraction', - 'AutoMlTextExtractionInputs', - }, -) - - -class AutoMlTextExtraction(proto.Message): - r"""A TrainingJob that trains and uploads an AutoML Text - Extraction Model. - - Attributes: - inputs (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTextExtractionInputs): - The input parameters of this TrainingJob. - """ - - inputs = proto.Field( - proto.MESSAGE, - number=1, - message='AutoMlTextExtractionInputs', - ) - - -class AutoMlTextExtractionInputs(proto.Message): - r""" - """ - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_sentiment.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_sentiment.py deleted file mode 100644 index ff5eb97808..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_text_sentiment.py +++ /dev/null @@ -1,67 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.trainingjob.definition', - manifest={ - 'AutoMlTextSentiment', - 'AutoMlTextSentimentInputs', - }, -) - - -class AutoMlTextSentiment(proto.Message): - r"""A TrainingJob that trains and uploads an AutoML Text - Sentiment Model. - - Attributes: - inputs (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlTextSentimentInputs): - The input parameters of this TrainingJob. - """ - - inputs = proto.Field( - proto.MESSAGE, - number=1, - message='AutoMlTextSentimentInputs', - ) - - -class AutoMlTextSentimentInputs(proto.Message): - r""" - - Attributes: - sentiment_max (int): - A sentiment is expressed as an integer - ordinal, where higher value means a more - positive sentiment. The range of sentiments that - will be used is between 0 and sentimentMax - (inclusive on both ends), and all the values in - the range must be represented in the dataset - before a model can be created. - Only the Annotations with this sentimentMax will - be used for training. sentimentMax value must be - between 1 and 10 (inclusive). - """ - - sentiment_max = proto.Field( - proto.INT32, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_time_series_forecasting.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_time_series_forecasting.py deleted file mode 100644 index eb25865728..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_time_series_forecasting.py +++ /dev/null @@ -1,493 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types import export_evaluated_data_items_config as gcastd_export_evaluated_data_items_config - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.trainingjob.definition', - manifest={ - 'AutoMlForecasting', - 'AutoMlForecastingInputs', - 'AutoMlForecastingMetadata', - }, -) - - -class AutoMlForecasting(proto.Message): - r"""A TrainingJob that trains and uploads an AutoML Forecasting - Model. - - Attributes: - inputs (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlForecastingInputs): - The input parameters of this TrainingJob. - metadata (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlForecastingMetadata): - The metadata information. - """ - - inputs = proto.Field( - proto.MESSAGE, - number=1, - message='AutoMlForecastingInputs', - ) - metadata = proto.Field( - proto.MESSAGE, - number=2, - message='AutoMlForecastingMetadata', - ) - - -class AutoMlForecastingInputs(proto.Message): - r""" - - Attributes: - target_column (str): - The name of the column that the model is to - predict. - time_series_identifier_column (str): - The name of the column that identifies the - time series. - time_column (str): - The name of the column that identifies time - order in the time series. - transformations (Sequence[google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlForecastingInputs.Transformation]): - Each transformation will apply transform - function to given input column. And the result - will be used for training. When creating - transformation for BigQuery Struct column, the - column should be flattened using "." as the - delimiter. - optimization_objective (str): - Objective function the model is optimizing towards. The - training process creates a model that optimizes the value of - the objective function over the validation set. - - The supported optimization objectives: - - - "minimize-rmse" (default) - Minimize root-mean-squared - error (RMSE). - - - "minimize-mae" - Minimize mean-absolute error (MAE). - - - "minimize-rmsle" - Minimize root-mean-squared log error - (RMSLE). - - - "minimize-rmspe" - Minimize root-mean-squared percentage - error (RMSPE). - - - "minimize-wape-mae" - Minimize the combination of - weighted absolute percentage error (WAPE) and - mean-absolute-error (MAE). - - - "minimize-quantile-loss" - Minimize the quantile loss at - the quantiles defined in ``quantiles``. - train_budget_milli_node_hours (int): - Required. The train budget of creating this - model, expressed in milli node hours i.e. 1,000 - value in this field means 1 node hour. - The training cost of the model will not exceed - this budget. The final cost will be attempted to - be close to the budget, though may end up being - (even) noticeably smaller - at the backend's - discretion. This especially may happen when - further model training ceases to provide any - improvements. - If the budget is set to a value known to be - insufficient to train a model for the given - dataset, the training won't be attempted and - will error. - - The train budget must be between 1,000 and - 72,000 milli node hours, inclusive. - weight_column (str): - Column name that should be used as the weight - column. Higher values in this column give more - importance to the row during model training. The - column must have numeric values between 0 and - 10000 inclusively; 0 means the row is ignored - for training. If weight column field is not set, - then all rows are assumed to have equal weight - of 1. - time_series_attribute_columns (Sequence[str]): - Column names that should be used as attribute - columns. The value of these columns does not - vary as a function of time. For example, store - ID or item color. - unavailable_at_forecast_columns (Sequence[str]): - Names of columns that are unavailable when a forecast is - requested. This column contains information for the given - entity (identified by the time_series_identifier_column) - that is unknown before the forecast For example, actual - weather on a given day. - available_at_forecast_columns (Sequence[str]): - Names of columns that are available and provided when a - forecast is requested. These columns contain information for - the given entity (identified by the - time_series_identifier_column column) that is known at - forecast. For example, predicted weather for a specific day. - data_granularity (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlForecastingInputs.Granularity): - Expected difference in time granularity - between rows in the data. - forecast_horizon (int): - The amount of time into the future for which forecasted - values for the target are returned. Expressed in number of - units defined by the ``data_granularity`` field. - context_window (int): - The amount of time into the past training and prediction - data is used for model training and prediction respectively. - Expressed in number of units defined by the - ``data_granularity`` field. - export_evaluated_data_items_config (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.ExportEvaluatedDataItemsConfig): - Configuration for exporting test set - predictions to a BigQuery table. If this - configuration is absent, then the export is not - performed. - quantiles (Sequence[float]): - Quantiles to use for minimize-quantile-loss - ``optimization_objective``. Up to 5 quantiles are allowed of - values between 0 and 1, exclusive. Required if the value of - optimization_objective is minimize-quantile-loss. Represents - the percent quantiles to use for that objective. Quantiles - must be unique. - validation_options (str): - Validation options for the data validation component. The - available options are: - - - "fail-pipeline" - default, will validate against the - validation and fail the pipeline if it fails. - - - "ignore-validation" - ignore the results of the - validation and continue - additional_experiments (Sequence[str]): - Additional experiment flags for the time - series forcasting training. - """ - - class Transformation(proto.Message): - r""" - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - auto (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlForecastingInputs.Transformation.AutoTransformation): - - This field is a member of `oneof`_ ``transformation_detail``. - numeric (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlForecastingInputs.Transformation.NumericTransformation): - - This field is a member of `oneof`_ ``transformation_detail``. - categorical (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlForecastingInputs.Transformation.CategoricalTransformation): - - This field is a member of `oneof`_ ``transformation_detail``. - timestamp (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlForecastingInputs.Transformation.TimestampTransformation): - - This field is a member of `oneof`_ ``transformation_detail``. - text (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlForecastingInputs.Transformation.TextTransformation): - - This field is a member of `oneof`_ ``transformation_detail``. - """ - - class AutoTransformation(proto.Message): - r"""Training pipeline will infer the proper transformation based - on the statistic of dataset. - - Attributes: - column_name (str): - - """ - - column_name = proto.Field( - proto.STRING, - number=1, - ) - - class NumericTransformation(proto.Message): - r"""Training pipeline will perform following transformation functions. - - - The value converted to float32. - - - The z_score of the value. - - - log(value+1) when the value is greater than or equal to 0. - Otherwise, this transformation is not applied and the value is - considered a missing value. - - - z_score of log(value+1) when the value is greater than or equal - to 0. Otherwise, this transformation is not applied and the value - is considered a missing value. - - - A boolean value that indicates whether the value is valid. - - Attributes: - column_name (str): - - """ - - column_name = proto.Field( - proto.STRING, - number=1, - ) - - class CategoricalTransformation(proto.Message): - r"""Training pipeline will perform following transformation functions. - - - The categorical string as is--no change to case, punctuation, - spelling, tense, and so on. - - - Convert the category name to a dictionary lookup index and - generate an embedding for each index. - - - Categories that appear less than 5 times in the training dataset - are treated as the "unknown" category. The "unknown" category - gets its own special lookup index and resulting embedding. - - Attributes: - column_name (str): - - """ - - column_name = proto.Field( - proto.STRING, - number=1, - ) - - class TimestampTransformation(proto.Message): - r"""Training pipeline will perform following transformation functions. - - - Apply the transformation functions for Numerical columns. - - - Determine the year, month, day,and weekday. Treat each value from - the timestamp as a Categorical column. - - - Invalid numerical values (for example, values that fall outside - of a typical timestamp range, or are extreme values) receive no - special treatment and are not removed. - - Attributes: - column_name (str): - - time_format (str): - The format in which that time field is expressed. The - time_format must either be one of: - - - ``unix-seconds`` - - - ``unix-milliseconds`` - - - ``unix-microseconds`` - - - ``unix-nanoseconds`` - - (for respectively number of seconds, milliseconds, - microseconds and nanoseconds since start of the Unix epoch); - - or be written in ``strftime`` syntax. - - If time_format is not set, then the default format is RFC - 3339 ``date-time`` format, where ``time-offset`` = ``"Z"`` - (e.g. 1985-04-12T23:20:50.52Z) - """ - - column_name = proto.Field( - proto.STRING, - number=1, - ) - time_format = proto.Field( - proto.STRING, - number=2, - ) - - class TextTransformation(proto.Message): - r"""Training pipeline will perform following transformation functions. - - - The text as is--no change to case, punctuation, spelling, tense, - and so on. - - - Convert the category name to a dictionary lookup index and - generate an embedding for each index. - - Attributes: - column_name (str): - - """ - - column_name = proto.Field( - proto.STRING, - number=1, - ) - - auto = proto.Field( - proto.MESSAGE, - number=1, - oneof='transformation_detail', - message='AutoMlForecastingInputs.Transformation.AutoTransformation', - ) - numeric = proto.Field( - proto.MESSAGE, - number=2, - oneof='transformation_detail', - message='AutoMlForecastingInputs.Transformation.NumericTransformation', - ) - categorical = proto.Field( - proto.MESSAGE, - number=3, - oneof='transformation_detail', - message='AutoMlForecastingInputs.Transformation.CategoricalTransformation', - ) - timestamp = proto.Field( - proto.MESSAGE, - number=4, - oneof='transformation_detail', - message='AutoMlForecastingInputs.Transformation.TimestampTransformation', - ) - text = proto.Field( - proto.MESSAGE, - number=5, - oneof='transformation_detail', - message='AutoMlForecastingInputs.Transformation.TextTransformation', - ) - - class Granularity(proto.Message): - r"""A duration of time expressed in time granularity units. - - Attributes: - unit (str): - The time granularity unit of this time period. The supported - units are: - - - "minute" - - - "hour" - - - "day" - - - "week" - - - "month" - - - "year". - quantity (int): - The number of granularity_units between data points in the - training data. If ``granularity_unit`` is ``minute``, can be - 1, 5, 10, 15, or 30. For all other values of - ``granularity_unit``, must be 1. - """ - - unit = proto.Field( - proto.STRING, - number=1, - ) - quantity = proto.Field( - proto.INT64, - number=2, - ) - - target_column = proto.Field( - proto.STRING, - number=1, - ) - time_series_identifier_column = proto.Field( - proto.STRING, - number=2, - ) - time_column = proto.Field( - proto.STRING, - number=3, - ) - transformations = proto.RepeatedField( - proto.MESSAGE, - number=4, - message=Transformation, - ) - optimization_objective = proto.Field( - proto.STRING, - number=5, - ) - train_budget_milli_node_hours = proto.Field( - proto.INT64, - number=6, - ) - weight_column = proto.Field( - proto.STRING, - number=7, - ) - time_series_attribute_columns = proto.RepeatedField( - proto.STRING, - number=19, - ) - unavailable_at_forecast_columns = proto.RepeatedField( - proto.STRING, - number=20, - ) - available_at_forecast_columns = proto.RepeatedField( - proto.STRING, - number=21, - ) - data_granularity = proto.Field( - proto.MESSAGE, - number=22, - message=Granularity, - ) - forecast_horizon = proto.Field( - proto.INT64, - number=23, - ) - context_window = proto.Field( - proto.INT64, - number=24, - ) - export_evaluated_data_items_config = proto.Field( - proto.MESSAGE, - number=15, - message=gcastd_export_evaluated_data_items_config.ExportEvaluatedDataItemsConfig, - ) - quantiles = proto.RepeatedField( - proto.DOUBLE, - number=16, - ) - validation_options = proto.Field( - proto.STRING, - number=17, - ) - additional_experiments = proto.RepeatedField( - proto.STRING, - number=25, - ) - - -class AutoMlForecastingMetadata(proto.Message): - r"""Model metadata specific to AutoML Forecasting. - - Attributes: - train_cost_milli_node_hours (int): - Output only. The actual training cost of the - model, expressed in milli node hours, i.e. 1,000 - value in this field means 1 node hour. - Guaranteed to not exceed the train budget. - """ - - train_cost_milli_node_hours = proto.Field( - proto.INT64, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_action_recognition.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_action_recognition.py deleted file mode 100644 index 593448b799..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_action_recognition.py +++ /dev/null @@ -1,66 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.trainingjob.definition', - manifest={ - 'AutoMlVideoActionRecognition', - 'AutoMlVideoActionRecognitionInputs', - }, -) - - -class AutoMlVideoActionRecognition(proto.Message): - r"""A TrainingJob that trains and uploads an AutoML Video Action - Recognition Model. - - Attributes: - inputs (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlVideoActionRecognitionInputs): - The input parameters of this TrainingJob. - """ - - inputs = proto.Field( - proto.MESSAGE, - number=1, - message='AutoMlVideoActionRecognitionInputs', - ) - - -class AutoMlVideoActionRecognitionInputs(proto.Message): - r""" - - Attributes: - model_type (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlVideoActionRecognitionInputs.ModelType): - - """ - class ModelType(proto.Enum): - r"""""" - MODEL_TYPE_UNSPECIFIED = 0 - CLOUD = 1 - MOBILE_VERSATILE_1 = 2 - MOBILE_JETSON_VERSATILE_1 = 3 - MOBILE_CORAL_VERSATILE_1 = 4 - - model_type = proto.Field( - proto.ENUM, - number=1, - enum=ModelType, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_classification.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_classification.py deleted file mode 100644 index cca3bc4e2b..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_classification.py +++ /dev/null @@ -1,65 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.trainingjob.definition', - manifest={ - 'AutoMlVideoClassification', - 'AutoMlVideoClassificationInputs', - }, -) - - -class AutoMlVideoClassification(proto.Message): - r"""A TrainingJob that trains and uploads an AutoML Video - Classification Model. - - Attributes: - inputs (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlVideoClassificationInputs): - The input parameters of this TrainingJob. - """ - - inputs = proto.Field( - proto.MESSAGE, - number=1, - message='AutoMlVideoClassificationInputs', - ) - - -class AutoMlVideoClassificationInputs(proto.Message): - r""" - - Attributes: - model_type (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlVideoClassificationInputs.ModelType): - - """ - class ModelType(proto.Enum): - r"""""" - MODEL_TYPE_UNSPECIFIED = 0 - CLOUD = 1 - MOBILE_VERSATILE_1 = 2 - MOBILE_JETSON_VERSATILE_1 = 3 - - model_type = proto.Field( - proto.ENUM, - number=1, - enum=ModelType, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_object_tracking.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_object_tracking.py deleted file mode 100644 index a1a622d0bd..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_video_object_tracking.py +++ /dev/null @@ -1,68 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.trainingjob.definition', - manifest={ - 'AutoMlVideoObjectTracking', - 'AutoMlVideoObjectTrackingInputs', - }, -) - - -class AutoMlVideoObjectTracking(proto.Message): - r"""A TrainingJob that trains and uploads an AutoML Video - ObjectTracking Model. - - Attributes: - inputs (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlVideoObjectTrackingInputs): - The input parameters of this TrainingJob. - """ - - inputs = proto.Field( - proto.MESSAGE, - number=1, - message='AutoMlVideoObjectTrackingInputs', - ) - - -class AutoMlVideoObjectTrackingInputs(proto.Message): - r""" - - Attributes: - model_type (google.cloud.aiplatform.v1beta1.schema.trainingjob.definition_v1beta1.types.AutoMlVideoObjectTrackingInputs.ModelType): - - """ - class ModelType(proto.Enum): - r"""""" - MODEL_TYPE_UNSPECIFIED = 0 - CLOUD = 1 - MOBILE_VERSATILE_1 = 2 - MOBILE_CORAL_VERSATILE_1 = 3 - MOBILE_CORAL_LOW_LATENCY_1 = 4 - MOBILE_JETSON_VERSATILE_1 = 5 - MOBILE_JETSON_LOW_LATENCY_1 = 6 - - model_type = proto.Field( - proto.ENUM, - number=1, - enum=ModelType, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/export_evaluated_data_items_config.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/export_evaluated_data_items_config.py deleted file mode 100644 index f2f42e233b..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/export_evaluated_data_items_config.py +++ /dev/null @@ -1,57 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1.schema.trainingjob.definition', - manifest={ - 'ExportEvaluatedDataItemsConfig', - }, -) - - -class ExportEvaluatedDataItemsConfig(proto.Message): - r"""Configuration for exporting test set predictions to a - BigQuery table. - - Attributes: - destination_bigquery_uri (str): - URI of desired destination BigQuery table. Expected format: - bq://:: - - If not specified, then results are exported to the following - auto-created BigQuery table: - :export_evaluated_examples__.evaluated_examples - override_existing_table (bool): - If true and an export destination is - specified, then the contents of the destination - are overwritten. Otherwise, if the export - destination already exists, then the export - operation fails. - """ - - destination_bigquery_uri = proto.Field( - proto.STRING, - number=1, - ) - override_existing_table = proto.Field( - proto.BOOL, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/__init__.py deleted file mode 100644 index f261a04925..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/__init__.py +++ /dev/null @@ -1,932 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from .services.dataset_service import DatasetServiceClient -from .services.dataset_service import DatasetServiceAsyncClient -from .services.endpoint_service import EndpointServiceClient -from .services.endpoint_service import EndpointServiceAsyncClient -from .services.featurestore_online_serving_service import FeaturestoreOnlineServingServiceClient -from .services.featurestore_online_serving_service import FeaturestoreOnlineServingServiceAsyncClient -from .services.featurestore_service import FeaturestoreServiceClient -from .services.featurestore_service import FeaturestoreServiceAsyncClient -from .services.index_endpoint_service import IndexEndpointServiceClient -from .services.index_endpoint_service import IndexEndpointServiceAsyncClient -from .services.index_service import IndexServiceClient -from .services.index_service import IndexServiceAsyncClient -from .services.job_service import JobServiceClient -from .services.job_service import JobServiceAsyncClient -from .services.metadata_service import MetadataServiceClient -from .services.metadata_service import MetadataServiceAsyncClient -from .services.migration_service import MigrationServiceClient -from .services.migration_service import MigrationServiceAsyncClient -from .services.model_service import ModelServiceClient -from .services.model_service import ModelServiceAsyncClient -from .services.pipeline_service import PipelineServiceClient -from .services.pipeline_service import PipelineServiceAsyncClient -from .services.prediction_service import PredictionServiceClient -from .services.prediction_service import PredictionServiceAsyncClient -from .services.specialist_pool_service import SpecialistPoolServiceClient -from .services.specialist_pool_service import SpecialistPoolServiceAsyncClient -from .services.tensorboard_service import TensorboardServiceClient -from .services.tensorboard_service import TensorboardServiceAsyncClient -from .services.vizier_service import VizierServiceClient -from .services.vizier_service import VizierServiceAsyncClient - -from .types.accelerator_type import AcceleratorType -from .types.annotation import Annotation -from .types.annotation_spec import AnnotationSpec -from .types.artifact import Artifact -from .types.batch_prediction_job import BatchPredictionJob -from .types.completion_stats import CompletionStats -from .types.context import Context -from .types.custom_job import ContainerSpec -from .types.custom_job import CustomJob -from .types.custom_job import CustomJobSpec -from .types.custom_job import PythonPackageSpec -from .types.custom_job import Scheduling -from .types.custom_job import WorkerPoolSpec -from .types.data_item import DataItem -from .types.data_labeling_job import ActiveLearningConfig -from .types.data_labeling_job import DataLabelingJob -from .types.data_labeling_job import SampleConfig -from .types.data_labeling_job import TrainingConfig -from .types.dataset import Dataset -from .types.dataset import ExportDataConfig -from .types.dataset import ImportDataConfig -from .types.dataset_service import CreateDatasetOperationMetadata -from .types.dataset_service import CreateDatasetRequest -from .types.dataset_service import DeleteDatasetRequest -from .types.dataset_service import ExportDataOperationMetadata -from .types.dataset_service import ExportDataRequest -from .types.dataset_service import ExportDataResponse -from .types.dataset_service import GetAnnotationSpecRequest -from .types.dataset_service import GetDatasetRequest -from .types.dataset_service import ImportDataOperationMetadata -from .types.dataset_service import ImportDataRequest -from .types.dataset_service import ImportDataResponse -from .types.dataset_service import ListAnnotationsRequest -from .types.dataset_service import ListAnnotationsResponse -from .types.dataset_service import ListDataItemsRequest -from .types.dataset_service import ListDataItemsResponse -from .types.dataset_service import ListDatasetsRequest -from .types.dataset_service import ListDatasetsResponse -from .types.dataset_service import UpdateDatasetRequest -from .types.deployed_index_ref import DeployedIndexRef -from .types.deployed_model_ref import DeployedModelRef -from .types.encryption_spec import EncryptionSpec -from .types.endpoint import DeployedModel -from .types.endpoint import Endpoint -from .types.endpoint import PrivateEndpoints -from .types.endpoint_service import CreateEndpointOperationMetadata -from .types.endpoint_service import CreateEndpointRequest -from .types.endpoint_service import DeleteEndpointRequest -from .types.endpoint_service import DeployModelOperationMetadata -from .types.endpoint_service import DeployModelRequest -from .types.endpoint_service import DeployModelResponse -from .types.endpoint_service import GetEndpointRequest -from .types.endpoint_service import ListEndpointsRequest -from .types.endpoint_service import ListEndpointsResponse -from .types.endpoint_service import UndeployModelOperationMetadata -from .types.endpoint_service import UndeployModelRequest -from .types.endpoint_service import UndeployModelResponse -from .types.endpoint_service import UpdateEndpointRequest -from .types.entity_type import EntityType -from .types.env_var import EnvVar -from .types.event import Event -from .types.execution import Execution -from .types.explanation import Attribution -from .types.explanation import BlurBaselineConfig -from .types.explanation import Explanation -from .types.explanation import ExplanationMetadataOverride -from .types.explanation import ExplanationParameters -from .types.explanation import ExplanationSpec -from .types.explanation import ExplanationSpecOverride -from .types.explanation import FeatureNoiseSigma -from .types.explanation import IntegratedGradientsAttribution -from .types.explanation import ModelExplanation -from .types.explanation import SampledShapleyAttribution -from .types.explanation import Similarity -from .types.explanation import SmoothGradConfig -from .types.explanation import XraiAttribution -from .types.explanation_metadata import ExplanationMetadata -from .types.feature import Feature -from .types.feature_monitoring_stats import FeatureStatsAnomaly -from .types.feature_selector import FeatureSelector -from .types.feature_selector import IdMatcher -from .types.featurestore import Featurestore -from .types.featurestore_monitoring import FeaturestoreMonitoringConfig -from .types.featurestore_online_service import FeatureValue -from .types.featurestore_online_service import FeatureValueList -from .types.featurestore_online_service import ReadFeatureValuesRequest -from .types.featurestore_online_service import ReadFeatureValuesResponse -from .types.featurestore_online_service import StreamingReadFeatureValuesRequest -from .types.featurestore_service import BatchCreateFeaturesOperationMetadata -from .types.featurestore_service import BatchCreateFeaturesRequest -from .types.featurestore_service import BatchCreateFeaturesResponse -from .types.featurestore_service import BatchReadFeatureValuesOperationMetadata -from .types.featurestore_service import BatchReadFeatureValuesRequest -from .types.featurestore_service import BatchReadFeatureValuesResponse -from .types.featurestore_service import CreateEntityTypeOperationMetadata -from .types.featurestore_service import CreateEntityTypeRequest -from .types.featurestore_service import CreateFeatureOperationMetadata -from .types.featurestore_service import CreateFeatureRequest -from .types.featurestore_service import CreateFeaturestoreOperationMetadata -from .types.featurestore_service import CreateFeaturestoreRequest -from .types.featurestore_service import DeleteEntityTypeRequest -from .types.featurestore_service import DeleteFeatureRequest -from .types.featurestore_service import DeleteFeaturestoreRequest -from .types.featurestore_service import DestinationFeatureSetting -from .types.featurestore_service import ExportFeatureValuesOperationMetadata -from .types.featurestore_service import ExportFeatureValuesRequest -from .types.featurestore_service import ExportFeatureValuesResponse -from .types.featurestore_service import FeatureValueDestination -from .types.featurestore_service import GetEntityTypeRequest -from .types.featurestore_service import GetFeatureRequest -from .types.featurestore_service import GetFeaturestoreRequest -from .types.featurestore_service import ImportFeatureValuesOperationMetadata -from .types.featurestore_service import ImportFeatureValuesRequest -from .types.featurestore_service import ImportFeatureValuesResponse -from .types.featurestore_service import ListEntityTypesRequest -from .types.featurestore_service import ListEntityTypesResponse -from .types.featurestore_service import ListFeaturesRequest -from .types.featurestore_service import ListFeaturesResponse -from .types.featurestore_service import ListFeaturestoresRequest -from .types.featurestore_service import ListFeaturestoresResponse -from .types.featurestore_service import SearchFeaturesRequest -from .types.featurestore_service import SearchFeaturesResponse -from .types.featurestore_service import UpdateEntityTypeRequest -from .types.featurestore_service import UpdateFeatureRequest -from .types.featurestore_service import UpdateFeaturestoreOperationMetadata -from .types.featurestore_service import UpdateFeaturestoreRequest -from .types.hyperparameter_tuning_job import HyperparameterTuningJob -from .types.index import Index -from .types.index_endpoint import DeployedIndex -from .types.index_endpoint import DeployedIndexAuthConfig -from .types.index_endpoint import IndexEndpoint -from .types.index_endpoint import IndexPrivateEndpoints -from .types.index_endpoint_service import CreateIndexEndpointOperationMetadata -from .types.index_endpoint_service import CreateIndexEndpointRequest -from .types.index_endpoint_service import DeleteIndexEndpointRequest -from .types.index_endpoint_service import DeployIndexOperationMetadata -from .types.index_endpoint_service import DeployIndexRequest -from .types.index_endpoint_service import DeployIndexResponse -from .types.index_endpoint_service import GetIndexEndpointRequest -from .types.index_endpoint_service import ListIndexEndpointsRequest -from .types.index_endpoint_service import ListIndexEndpointsResponse -from .types.index_endpoint_service import MutateDeployedIndexOperationMetadata -from .types.index_endpoint_service import MutateDeployedIndexRequest -from .types.index_endpoint_service import MutateDeployedIndexResponse -from .types.index_endpoint_service import UndeployIndexOperationMetadata -from .types.index_endpoint_service import UndeployIndexRequest -from .types.index_endpoint_service import UndeployIndexResponse -from .types.index_endpoint_service import UpdateIndexEndpointRequest -from .types.index_service import CreateIndexOperationMetadata -from .types.index_service import CreateIndexRequest -from .types.index_service import DeleteIndexRequest -from .types.index_service import GetIndexRequest -from .types.index_service import ListIndexesRequest -from .types.index_service import ListIndexesResponse -from .types.index_service import NearestNeighborSearchOperationMetadata -from .types.index_service import UpdateIndexOperationMetadata -from .types.index_service import UpdateIndexRequest -from .types.io import AvroSource -from .types.io import BigQueryDestination -from .types.io import BigQuerySource -from .types.io import ContainerRegistryDestination -from .types.io import CsvDestination -from .types.io import CsvSource -from .types.io import GcsDestination -from .types.io import GcsSource -from .types.io import TFRecordDestination -from .types.job_service import CancelBatchPredictionJobRequest -from .types.job_service import CancelCustomJobRequest -from .types.job_service import CancelDataLabelingJobRequest -from .types.job_service import CancelHyperparameterTuningJobRequest -from .types.job_service import CreateBatchPredictionJobRequest -from .types.job_service import CreateCustomJobRequest -from .types.job_service import CreateDataLabelingJobRequest -from .types.job_service import CreateHyperparameterTuningJobRequest -from .types.job_service import CreateModelDeploymentMonitoringJobRequest -from .types.job_service import DeleteBatchPredictionJobRequest -from .types.job_service import DeleteCustomJobRequest -from .types.job_service import DeleteDataLabelingJobRequest -from .types.job_service import DeleteHyperparameterTuningJobRequest -from .types.job_service import DeleteModelDeploymentMonitoringJobRequest -from .types.job_service import GetBatchPredictionJobRequest -from .types.job_service import GetCustomJobRequest -from .types.job_service import GetDataLabelingJobRequest -from .types.job_service import GetHyperparameterTuningJobRequest -from .types.job_service import GetModelDeploymentMonitoringJobRequest -from .types.job_service import ListBatchPredictionJobsRequest -from .types.job_service import ListBatchPredictionJobsResponse -from .types.job_service import ListCustomJobsRequest -from .types.job_service import ListCustomJobsResponse -from .types.job_service import ListDataLabelingJobsRequest -from .types.job_service import ListDataLabelingJobsResponse -from .types.job_service import ListHyperparameterTuningJobsRequest -from .types.job_service import ListHyperparameterTuningJobsResponse -from .types.job_service import ListModelDeploymentMonitoringJobsRequest -from .types.job_service import ListModelDeploymentMonitoringJobsResponse -from .types.job_service import PauseModelDeploymentMonitoringJobRequest -from .types.job_service import ResumeModelDeploymentMonitoringJobRequest -from .types.job_service import SearchModelDeploymentMonitoringStatsAnomaliesRequest -from .types.job_service import SearchModelDeploymentMonitoringStatsAnomaliesResponse -from .types.job_service import UpdateModelDeploymentMonitoringJobOperationMetadata -from .types.job_service import UpdateModelDeploymentMonitoringJobRequest -from .types.job_state import JobState -from .types.lineage_subgraph import LineageSubgraph -from .types.machine_resources import AutomaticResources -from .types.machine_resources import AutoscalingMetricSpec -from .types.machine_resources import BatchDedicatedResources -from .types.machine_resources import DedicatedResources -from .types.machine_resources import DiskSpec -from .types.machine_resources import MachineSpec -from .types.machine_resources import ResourcesConsumed -from .types.manual_batch_tuning_parameters import ManualBatchTuningParameters -from .types.metadata_schema import MetadataSchema -from .types.metadata_service import AddContextArtifactsAndExecutionsRequest -from .types.metadata_service import AddContextArtifactsAndExecutionsResponse -from .types.metadata_service import AddContextChildrenRequest -from .types.metadata_service import AddContextChildrenResponse -from .types.metadata_service import AddExecutionEventsRequest -from .types.metadata_service import AddExecutionEventsResponse -from .types.metadata_service import CreateArtifactRequest -from .types.metadata_service import CreateContextRequest -from .types.metadata_service import CreateExecutionRequest -from .types.metadata_service import CreateMetadataSchemaRequest -from .types.metadata_service import CreateMetadataStoreOperationMetadata -from .types.metadata_service import CreateMetadataStoreRequest -from .types.metadata_service import DeleteArtifactRequest -from .types.metadata_service import DeleteContextRequest -from .types.metadata_service import DeleteExecutionRequest -from .types.metadata_service import DeleteMetadataStoreOperationMetadata -from .types.metadata_service import DeleteMetadataStoreRequest -from .types.metadata_service import GetArtifactRequest -from .types.metadata_service import GetContextRequest -from .types.metadata_service import GetExecutionRequest -from .types.metadata_service import GetMetadataSchemaRequest -from .types.metadata_service import GetMetadataStoreRequest -from .types.metadata_service import ListArtifactsRequest -from .types.metadata_service import ListArtifactsResponse -from .types.metadata_service import ListContextsRequest -from .types.metadata_service import ListContextsResponse -from .types.metadata_service import ListExecutionsRequest -from .types.metadata_service import ListExecutionsResponse -from .types.metadata_service import ListMetadataSchemasRequest -from .types.metadata_service import ListMetadataSchemasResponse -from .types.metadata_service import ListMetadataStoresRequest -from .types.metadata_service import ListMetadataStoresResponse -from .types.metadata_service import PurgeArtifactsMetadata -from .types.metadata_service import PurgeArtifactsRequest -from .types.metadata_service import PurgeArtifactsResponse -from .types.metadata_service import PurgeContextsMetadata -from .types.metadata_service import PurgeContextsRequest -from .types.metadata_service import PurgeContextsResponse -from .types.metadata_service import PurgeExecutionsMetadata -from .types.metadata_service import PurgeExecutionsRequest -from .types.metadata_service import PurgeExecutionsResponse -from .types.metadata_service import QueryArtifactLineageSubgraphRequest -from .types.metadata_service import QueryContextLineageSubgraphRequest -from .types.metadata_service import QueryExecutionInputsAndOutputsRequest -from .types.metadata_service import UpdateArtifactRequest -from .types.metadata_service import UpdateContextRequest -from .types.metadata_service import UpdateExecutionRequest -from .types.metadata_store import MetadataStore -from .types.migratable_resource import MigratableResource -from .types.migration_service import BatchMigrateResourcesOperationMetadata -from .types.migration_service import BatchMigrateResourcesRequest -from .types.migration_service import BatchMigrateResourcesResponse -from .types.migration_service import MigrateResourceRequest -from .types.migration_service import MigrateResourceResponse -from .types.migration_service import SearchMigratableResourcesRequest -from .types.migration_service import SearchMigratableResourcesResponse -from .types.model import Model -from .types.model import ModelContainerSpec -from .types.model import Port -from .types.model import PredictSchemata -from .types.model_deployment_monitoring_job import ModelDeploymentMonitoringBigQueryTable -from .types.model_deployment_monitoring_job import ModelDeploymentMonitoringJob -from .types.model_deployment_monitoring_job import ModelDeploymentMonitoringObjectiveConfig -from .types.model_deployment_monitoring_job import ModelDeploymentMonitoringScheduleConfig -from .types.model_deployment_monitoring_job import ModelMonitoringStatsAnomalies -from .types.model_deployment_monitoring_job import ModelDeploymentMonitoringObjectiveType -from .types.model_evaluation import ModelEvaluation -from .types.model_evaluation_slice import ModelEvaluationSlice -from .types.model_monitoring import ModelMonitoringAlertConfig -from .types.model_monitoring import ModelMonitoringObjectiveConfig -from .types.model_monitoring import SamplingStrategy -from .types.model_monitoring import ThresholdConfig -from .types.model_service import DeleteModelRequest -from .types.model_service import ExportModelOperationMetadata -from .types.model_service import ExportModelRequest -from .types.model_service import ExportModelResponse -from .types.model_service import GetModelEvaluationRequest -from .types.model_service import GetModelEvaluationSliceRequest -from .types.model_service import GetModelRequest -from .types.model_service import ListModelEvaluationSlicesRequest -from .types.model_service import ListModelEvaluationSlicesResponse -from .types.model_service import ListModelEvaluationsRequest -from .types.model_service import ListModelEvaluationsResponse -from .types.model_service import ListModelsRequest -from .types.model_service import ListModelsResponse -from .types.model_service import UpdateModelRequest -from .types.model_service import UploadModelOperationMetadata -from .types.model_service import UploadModelRequest -from .types.model_service import UploadModelResponse -from .types.operation import DeleteOperationMetadata -from .types.operation import GenericOperationMetadata -from .types.pipeline_job import PipelineJob -from .types.pipeline_job import PipelineJobDetail -from .types.pipeline_job import PipelineTaskDetail -from .types.pipeline_job import PipelineTaskExecutorDetail -from .types.pipeline_service import CancelPipelineJobRequest -from .types.pipeline_service import CancelTrainingPipelineRequest -from .types.pipeline_service import CreatePipelineJobRequest -from .types.pipeline_service import CreateTrainingPipelineRequest -from .types.pipeline_service import DeletePipelineJobRequest -from .types.pipeline_service import DeleteTrainingPipelineRequest -from .types.pipeline_service import GetPipelineJobRequest -from .types.pipeline_service import GetTrainingPipelineRequest -from .types.pipeline_service import ListPipelineJobsRequest -from .types.pipeline_service import ListPipelineJobsResponse -from .types.pipeline_service import ListTrainingPipelinesRequest -from .types.pipeline_service import ListTrainingPipelinesResponse -from .types.pipeline_state import PipelineState -from .types.prediction_service import ExplainRequest -from .types.prediction_service import ExplainResponse -from .types.prediction_service import PredictRequest -from .types.prediction_service import PredictResponse -from .types.prediction_service import RawPredictRequest -from .types.specialist_pool import SpecialistPool -from .types.specialist_pool_service import CreateSpecialistPoolOperationMetadata -from .types.specialist_pool_service import CreateSpecialistPoolRequest -from .types.specialist_pool_service import DeleteSpecialistPoolRequest -from .types.specialist_pool_service import GetSpecialistPoolRequest -from .types.specialist_pool_service import ListSpecialistPoolsRequest -from .types.specialist_pool_service import ListSpecialistPoolsResponse -from .types.specialist_pool_service import UpdateSpecialistPoolOperationMetadata -from .types.specialist_pool_service import UpdateSpecialistPoolRequest -from .types.study import Measurement -from .types.study import Study -from .types.study import StudySpec -from .types.study import Trial -from .types.tensorboard import Tensorboard -from .types.tensorboard_data import Scalar -from .types.tensorboard_data import TensorboardBlob -from .types.tensorboard_data import TensorboardBlobSequence -from .types.tensorboard_data import TensorboardTensor -from .types.tensorboard_data import TimeSeriesData -from .types.tensorboard_data import TimeSeriesDataPoint -from .types.tensorboard_experiment import TensorboardExperiment -from .types.tensorboard_run import TensorboardRun -from .types.tensorboard_service import BatchCreateTensorboardRunsRequest -from .types.tensorboard_service import BatchCreateTensorboardRunsResponse -from .types.tensorboard_service import BatchCreateTensorboardTimeSeriesRequest -from .types.tensorboard_service import BatchCreateTensorboardTimeSeriesResponse -from .types.tensorboard_service import BatchReadTensorboardTimeSeriesDataRequest -from .types.tensorboard_service import BatchReadTensorboardTimeSeriesDataResponse -from .types.tensorboard_service import CreateTensorboardExperimentRequest -from .types.tensorboard_service import CreateTensorboardOperationMetadata -from .types.tensorboard_service import CreateTensorboardRequest -from .types.tensorboard_service import CreateTensorboardRunRequest -from .types.tensorboard_service import CreateTensorboardTimeSeriesRequest -from .types.tensorboard_service import DeleteTensorboardExperimentRequest -from .types.tensorboard_service import DeleteTensorboardRequest -from .types.tensorboard_service import DeleteTensorboardRunRequest -from .types.tensorboard_service import DeleteTensorboardTimeSeriesRequest -from .types.tensorboard_service import ExportTensorboardTimeSeriesDataRequest -from .types.tensorboard_service import ExportTensorboardTimeSeriesDataResponse -from .types.tensorboard_service import GetTensorboardExperimentRequest -from .types.tensorboard_service import GetTensorboardRequest -from .types.tensorboard_service import GetTensorboardRunRequest -from .types.tensorboard_service import GetTensorboardTimeSeriesRequest -from .types.tensorboard_service import ListTensorboardExperimentsRequest -from .types.tensorboard_service import ListTensorboardExperimentsResponse -from .types.tensorboard_service import ListTensorboardRunsRequest -from .types.tensorboard_service import ListTensorboardRunsResponse -from .types.tensorboard_service import ListTensorboardsRequest -from .types.tensorboard_service import ListTensorboardsResponse -from .types.tensorboard_service import ListTensorboardTimeSeriesRequest -from .types.tensorboard_service import ListTensorboardTimeSeriesResponse -from .types.tensorboard_service import ReadTensorboardBlobDataRequest -from .types.tensorboard_service import ReadTensorboardBlobDataResponse -from .types.tensorboard_service import ReadTensorboardTimeSeriesDataRequest -from .types.tensorboard_service import ReadTensorboardTimeSeriesDataResponse -from .types.tensorboard_service import UpdateTensorboardExperimentRequest -from .types.tensorboard_service import UpdateTensorboardOperationMetadata -from .types.tensorboard_service import UpdateTensorboardRequest -from .types.tensorboard_service import UpdateTensorboardRunRequest -from .types.tensorboard_service import UpdateTensorboardTimeSeriesRequest -from .types.tensorboard_service import WriteTensorboardExperimentDataRequest -from .types.tensorboard_service import WriteTensorboardExperimentDataResponse -from .types.tensorboard_service import WriteTensorboardRunDataRequest -from .types.tensorboard_service import WriteTensorboardRunDataResponse -from .types.tensorboard_time_series import TensorboardTimeSeries -from .types.training_pipeline import FilterSplit -from .types.training_pipeline import FractionSplit -from .types.training_pipeline import InputDataConfig -from .types.training_pipeline import PredefinedSplit -from .types.training_pipeline import StratifiedSplit -from .types.training_pipeline import TimestampSplit -from .types.training_pipeline import TrainingPipeline -from .types.types import BoolArray -from .types.types import DoubleArray -from .types.types import Int64Array -from .types.types import StringArray -from .types.unmanaged_container_model import UnmanagedContainerModel -from .types.user_action_reference import UserActionReference -from .types.value import Value -from .types.vizier_service import AddTrialMeasurementRequest -from .types.vizier_service import CheckTrialEarlyStoppingStateMetatdata -from .types.vizier_service import CheckTrialEarlyStoppingStateRequest -from .types.vizier_service import CheckTrialEarlyStoppingStateResponse -from .types.vizier_service import CompleteTrialRequest -from .types.vizier_service import CreateStudyRequest -from .types.vizier_service import CreateTrialRequest -from .types.vizier_service import DeleteStudyRequest -from .types.vizier_service import DeleteTrialRequest -from .types.vizier_service import GetStudyRequest -from .types.vizier_service import GetTrialRequest -from .types.vizier_service import ListOptimalTrialsRequest -from .types.vizier_service import ListOptimalTrialsResponse -from .types.vizier_service import ListStudiesRequest -from .types.vizier_service import ListStudiesResponse -from .types.vizier_service import ListTrialsRequest -from .types.vizier_service import ListTrialsResponse -from .types.vizier_service import LookupStudyRequest -from .types.vizier_service import StopTrialRequest -from .types.vizier_service import SuggestTrialsMetadata -from .types.vizier_service import SuggestTrialsRequest -from .types.vizier_service import SuggestTrialsResponse - -__all__ = ( - 'DatasetServiceAsyncClient', - 'EndpointServiceAsyncClient', - 'FeaturestoreOnlineServingServiceAsyncClient', - 'FeaturestoreServiceAsyncClient', - 'IndexEndpointServiceAsyncClient', - 'IndexServiceAsyncClient', - 'JobServiceAsyncClient', - 'MetadataServiceAsyncClient', - 'MigrationServiceAsyncClient', - 'ModelServiceAsyncClient', - 'PipelineServiceAsyncClient', - 'PredictionServiceAsyncClient', - 'SpecialistPoolServiceAsyncClient', - 'TensorboardServiceAsyncClient', - 'VizierServiceAsyncClient', -'AcceleratorType', -'ActiveLearningConfig', -'AddContextArtifactsAndExecutionsRequest', -'AddContextArtifactsAndExecutionsResponse', -'AddContextChildrenRequest', -'AddContextChildrenResponse', -'AddExecutionEventsRequest', -'AddExecutionEventsResponse', -'AddTrialMeasurementRequest', -'Annotation', -'AnnotationSpec', -'Artifact', -'Attribution', -'AutomaticResources', -'AutoscalingMetricSpec', -'AvroSource', -'BatchCreateFeaturesOperationMetadata', -'BatchCreateFeaturesRequest', -'BatchCreateFeaturesResponse', -'BatchCreateTensorboardRunsRequest', -'BatchCreateTensorboardRunsResponse', -'BatchCreateTensorboardTimeSeriesRequest', -'BatchCreateTensorboardTimeSeriesResponse', -'BatchDedicatedResources', -'BatchMigrateResourcesOperationMetadata', -'BatchMigrateResourcesRequest', -'BatchMigrateResourcesResponse', -'BatchPredictionJob', -'BatchReadFeatureValuesOperationMetadata', -'BatchReadFeatureValuesRequest', -'BatchReadFeatureValuesResponse', -'BatchReadTensorboardTimeSeriesDataRequest', -'BatchReadTensorboardTimeSeriesDataResponse', -'BigQueryDestination', -'BigQuerySource', -'BlurBaselineConfig', -'BoolArray', -'CancelBatchPredictionJobRequest', -'CancelCustomJobRequest', -'CancelDataLabelingJobRequest', -'CancelHyperparameterTuningJobRequest', -'CancelPipelineJobRequest', -'CancelTrainingPipelineRequest', -'CheckTrialEarlyStoppingStateMetatdata', -'CheckTrialEarlyStoppingStateRequest', -'CheckTrialEarlyStoppingStateResponse', -'CompleteTrialRequest', -'CompletionStats', -'ContainerRegistryDestination', -'ContainerSpec', -'Context', -'CreateArtifactRequest', -'CreateBatchPredictionJobRequest', -'CreateContextRequest', -'CreateCustomJobRequest', -'CreateDataLabelingJobRequest', -'CreateDatasetOperationMetadata', -'CreateDatasetRequest', -'CreateEndpointOperationMetadata', -'CreateEndpointRequest', -'CreateEntityTypeOperationMetadata', -'CreateEntityTypeRequest', -'CreateExecutionRequest', -'CreateFeatureOperationMetadata', -'CreateFeatureRequest', -'CreateFeaturestoreOperationMetadata', -'CreateFeaturestoreRequest', -'CreateHyperparameterTuningJobRequest', -'CreateIndexEndpointOperationMetadata', -'CreateIndexEndpointRequest', -'CreateIndexOperationMetadata', -'CreateIndexRequest', -'CreateMetadataSchemaRequest', -'CreateMetadataStoreOperationMetadata', -'CreateMetadataStoreRequest', -'CreateModelDeploymentMonitoringJobRequest', -'CreatePipelineJobRequest', -'CreateSpecialistPoolOperationMetadata', -'CreateSpecialistPoolRequest', -'CreateStudyRequest', -'CreateTensorboardExperimentRequest', -'CreateTensorboardOperationMetadata', -'CreateTensorboardRequest', -'CreateTensorboardRunRequest', -'CreateTensorboardTimeSeriesRequest', -'CreateTrainingPipelineRequest', -'CreateTrialRequest', -'CsvDestination', -'CsvSource', -'CustomJob', -'CustomJobSpec', -'DataItem', -'DataLabelingJob', -'Dataset', -'DatasetServiceClient', -'DedicatedResources', -'DeleteArtifactRequest', -'DeleteBatchPredictionJobRequest', -'DeleteContextRequest', -'DeleteCustomJobRequest', -'DeleteDataLabelingJobRequest', -'DeleteDatasetRequest', -'DeleteEndpointRequest', -'DeleteEntityTypeRequest', -'DeleteExecutionRequest', -'DeleteFeatureRequest', -'DeleteFeaturestoreRequest', -'DeleteHyperparameterTuningJobRequest', -'DeleteIndexEndpointRequest', -'DeleteIndexRequest', -'DeleteMetadataStoreOperationMetadata', -'DeleteMetadataStoreRequest', -'DeleteModelDeploymentMonitoringJobRequest', -'DeleteModelRequest', -'DeleteOperationMetadata', -'DeletePipelineJobRequest', -'DeleteSpecialistPoolRequest', -'DeleteStudyRequest', -'DeleteTensorboardExperimentRequest', -'DeleteTensorboardRequest', -'DeleteTensorboardRunRequest', -'DeleteTensorboardTimeSeriesRequest', -'DeleteTrainingPipelineRequest', -'DeleteTrialRequest', -'DeployIndexOperationMetadata', -'DeployIndexRequest', -'DeployIndexResponse', -'DeployModelOperationMetadata', -'DeployModelRequest', -'DeployModelResponse', -'DeployedIndex', -'DeployedIndexAuthConfig', -'DeployedIndexRef', -'DeployedModel', -'DeployedModelRef', -'DestinationFeatureSetting', -'DiskSpec', -'DoubleArray', -'EncryptionSpec', -'Endpoint', -'EndpointServiceClient', -'EntityType', -'EnvVar', -'Event', -'Execution', -'ExplainRequest', -'ExplainResponse', -'Explanation', -'ExplanationMetadata', -'ExplanationMetadataOverride', -'ExplanationParameters', -'ExplanationSpec', -'ExplanationSpecOverride', -'ExportDataConfig', -'ExportDataOperationMetadata', -'ExportDataRequest', -'ExportDataResponse', -'ExportFeatureValuesOperationMetadata', -'ExportFeatureValuesRequest', -'ExportFeatureValuesResponse', -'ExportModelOperationMetadata', -'ExportModelRequest', -'ExportModelResponse', -'ExportTensorboardTimeSeriesDataRequest', -'ExportTensorboardTimeSeriesDataResponse', -'Feature', -'FeatureNoiseSigma', -'FeatureSelector', -'FeatureStatsAnomaly', -'FeatureValue', -'FeatureValueDestination', -'FeatureValueList', -'Featurestore', -'FeaturestoreMonitoringConfig', -'FeaturestoreOnlineServingServiceClient', -'FeaturestoreServiceClient', -'FilterSplit', -'FractionSplit', -'GcsDestination', -'GcsSource', -'GenericOperationMetadata', -'GetAnnotationSpecRequest', -'GetArtifactRequest', -'GetBatchPredictionJobRequest', -'GetContextRequest', -'GetCustomJobRequest', -'GetDataLabelingJobRequest', -'GetDatasetRequest', -'GetEndpointRequest', -'GetEntityTypeRequest', -'GetExecutionRequest', -'GetFeatureRequest', -'GetFeaturestoreRequest', -'GetHyperparameterTuningJobRequest', -'GetIndexEndpointRequest', -'GetIndexRequest', -'GetMetadataSchemaRequest', -'GetMetadataStoreRequest', -'GetModelDeploymentMonitoringJobRequest', -'GetModelEvaluationRequest', -'GetModelEvaluationSliceRequest', -'GetModelRequest', -'GetPipelineJobRequest', -'GetSpecialistPoolRequest', -'GetStudyRequest', -'GetTensorboardExperimentRequest', -'GetTensorboardRequest', -'GetTensorboardRunRequest', -'GetTensorboardTimeSeriesRequest', -'GetTrainingPipelineRequest', -'GetTrialRequest', -'HyperparameterTuningJob', -'IdMatcher', -'ImportDataConfig', -'ImportDataOperationMetadata', -'ImportDataRequest', -'ImportDataResponse', -'ImportFeatureValuesOperationMetadata', -'ImportFeatureValuesRequest', -'ImportFeatureValuesResponse', -'Index', -'IndexEndpoint', -'IndexEndpointServiceClient', -'IndexPrivateEndpoints', -'IndexServiceClient', -'InputDataConfig', -'Int64Array', -'IntegratedGradientsAttribution', -'JobServiceClient', -'JobState', -'LineageSubgraph', -'ListAnnotationsRequest', -'ListAnnotationsResponse', -'ListArtifactsRequest', -'ListArtifactsResponse', -'ListBatchPredictionJobsRequest', -'ListBatchPredictionJobsResponse', -'ListContextsRequest', -'ListContextsResponse', -'ListCustomJobsRequest', -'ListCustomJobsResponse', -'ListDataItemsRequest', -'ListDataItemsResponse', -'ListDataLabelingJobsRequest', -'ListDataLabelingJobsResponse', -'ListDatasetsRequest', -'ListDatasetsResponse', -'ListEndpointsRequest', -'ListEndpointsResponse', -'ListEntityTypesRequest', -'ListEntityTypesResponse', -'ListExecutionsRequest', -'ListExecutionsResponse', -'ListFeaturesRequest', -'ListFeaturesResponse', -'ListFeaturestoresRequest', -'ListFeaturestoresResponse', -'ListHyperparameterTuningJobsRequest', -'ListHyperparameterTuningJobsResponse', -'ListIndexEndpointsRequest', -'ListIndexEndpointsResponse', -'ListIndexesRequest', -'ListIndexesResponse', -'ListMetadataSchemasRequest', -'ListMetadataSchemasResponse', -'ListMetadataStoresRequest', -'ListMetadataStoresResponse', -'ListModelDeploymentMonitoringJobsRequest', -'ListModelDeploymentMonitoringJobsResponse', -'ListModelEvaluationSlicesRequest', -'ListModelEvaluationSlicesResponse', -'ListModelEvaluationsRequest', -'ListModelEvaluationsResponse', -'ListModelsRequest', -'ListModelsResponse', -'ListOptimalTrialsRequest', -'ListOptimalTrialsResponse', -'ListPipelineJobsRequest', -'ListPipelineJobsResponse', -'ListSpecialistPoolsRequest', -'ListSpecialistPoolsResponse', -'ListStudiesRequest', -'ListStudiesResponse', -'ListTensorboardExperimentsRequest', -'ListTensorboardExperimentsResponse', -'ListTensorboardRunsRequest', -'ListTensorboardRunsResponse', -'ListTensorboardTimeSeriesRequest', -'ListTensorboardTimeSeriesResponse', -'ListTensorboardsRequest', -'ListTensorboardsResponse', -'ListTrainingPipelinesRequest', -'ListTrainingPipelinesResponse', -'ListTrialsRequest', -'ListTrialsResponse', -'LookupStudyRequest', -'MachineSpec', -'ManualBatchTuningParameters', -'Measurement', -'MetadataSchema', -'MetadataServiceClient', -'MetadataStore', -'MigratableResource', -'MigrateResourceRequest', -'MigrateResourceResponse', -'MigrationServiceClient', -'Model', -'ModelContainerSpec', -'ModelDeploymentMonitoringBigQueryTable', -'ModelDeploymentMonitoringJob', -'ModelDeploymentMonitoringObjectiveConfig', -'ModelDeploymentMonitoringObjectiveType', -'ModelDeploymentMonitoringScheduleConfig', -'ModelEvaluation', -'ModelEvaluationSlice', -'ModelExplanation', -'ModelMonitoringAlertConfig', -'ModelMonitoringObjectiveConfig', -'ModelMonitoringStatsAnomalies', -'ModelServiceClient', -'MutateDeployedIndexOperationMetadata', -'MutateDeployedIndexRequest', -'MutateDeployedIndexResponse', -'NearestNeighborSearchOperationMetadata', -'PauseModelDeploymentMonitoringJobRequest', -'PipelineJob', -'PipelineJobDetail', -'PipelineServiceClient', -'PipelineState', -'PipelineTaskDetail', -'PipelineTaskExecutorDetail', -'Port', -'PredefinedSplit', -'PredictRequest', -'PredictResponse', -'PredictSchemata', -'PredictionServiceClient', -'PrivateEndpoints', -'PurgeArtifactsMetadata', -'PurgeArtifactsRequest', -'PurgeArtifactsResponse', -'PurgeContextsMetadata', -'PurgeContextsRequest', -'PurgeContextsResponse', -'PurgeExecutionsMetadata', -'PurgeExecutionsRequest', -'PurgeExecutionsResponse', -'PythonPackageSpec', -'QueryArtifactLineageSubgraphRequest', -'QueryContextLineageSubgraphRequest', -'QueryExecutionInputsAndOutputsRequest', -'RawPredictRequest', -'ReadFeatureValuesRequest', -'ReadFeatureValuesResponse', -'ReadTensorboardBlobDataRequest', -'ReadTensorboardBlobDataResponse', -'ReadTensorboardTimeSeriesDataRequest', -'ReadTensorboardTimeSeriesDataResponse', -'ResourcesConsumed', -'ResumeModelDeploymentMonitoringJobRequest', -'SampleConfig', -'SampledShapleyAttribution', -'SamplingStrategy', -'Scalar', -'Scheduling', -'SearchFeaturesRequest', -'SearchFeaturesResponse', -'SearchMigratableResourcesRequest', -'SearchMigratableResourcesResponse', -'SearchModelDeploymentMonitoringStatsAnomaliesRequest', -'SearchModelDeploymentMonitoringStatsAnomaliesResponse', -'Similarity', -'SmoothGradConfig', -'SpecialistPool', -'SpecialistPoolServiceClient', -'StopTrialRequest', -'StratifiedSplit', -'StreamingReadFeatureValuesRequest', -'StringArray', -'Study', -'StudySpec', -'SuggestTrialsMetadata', -'SuggestTrialsRequest', -'SuggestTrialsResponse', -'TFRecordDestination', -'Tensorboard', -'TensorboardBlob', -'TensorboardBlobSequence', -'TensorboardExperiment', -'TensorboardRun', -'TensorboardServiceClient', -'TensorboardTensor', -'TensorboardTimeSeries', -'ThresholdConfig', -'TimeSeriesData', -'TimeSeriesDataPoint', -'TimestampSplit', -'TrainingConfig', -'TrainingPipeline', -'Trial', -'UndeployIndexOperationMetadata', -'UndeployIndexRequest', -'UndeployIndexResponse', -'UndeployModelOperationMetadata', -'UndeployModelRequest', -'UndeployModelResponse', -'UnmanagedContainerModel', -'UpdateArtifactRequest', -'UpdateContextRequest', -'UpdateDatasetRequest', -'UpdateEndpointRequest', -'UpdateEntityTypeRequest', -'UpdateExecutionRequest', -'UpdateFeatureRequest', -'UpdateFeaturestoreOperationMetadata', -'UpdateFeaturestoreRequest', -'UpdateIndexEndpointRequest', -'UpdateIndexOperationMetadata', -'UpdateIndexRequest', -'UpdateModelDeploymentMonitoringJobOperationMetadata', -'UpdateModelDeploymentMonitoringJobRequest', -'UpdateModelRequest', -'UpdateSpecialistPoolOperationMetadata', -'UpdateSpecialistPoolRequest', -'UpdateTensorboardExperimentRequest', -'UpdateTensorboardOperationMetadata', -'UpdateTensorboardRequest', -'UpdateTensorboardRunRequest', -'UpdateTensorboardTimeSeriesRequest', -'UploadModelOperationMetadata', -'UploadModelRequest', -'UploadModelResponse', -'UserActionReference', -'Value', -'VizierServiceClient', -'WorkerPoolSpec', -'WriteTensorboardExperimentDataRequest', -'WriteTensorboardExperimentDataResponse', -'WriteTensorboardRunDataRequest', -'WriteTensorboardRunDataResponse', -'XraiAttribution', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/gapic_metadata.json b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/gapic_metadata.json deleted file mode 100644 index b584f16b81..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/gapic_metadata.json +++ /dev/null @@ -1,2059 +0,0 @@ - { - "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", - "language": "python", - "libraryPackage": "google.cloud.aiplatform_v1beta1", - "protoPackage": "google.cloud.aiplatform.v1beta1", - "schema": "1.0", - "services": { - "DatasetService": { - "clients": { - "grpc": { - "libraryClient": "DatasetServiceClient", - "rpcs": { - "CreateDataset": { - "methods": [ - "create_dataset" - ] - }, - "DeleteDataset": { - "methods": [ - "delete_dataset" - ] - }, - "ExportData": { - "methods": [ - "export_data" - ] - }, - "GetAnnotationSpec": { - "methods": [ - "get_annotation_spec" - ] - }, - "GetDataset": { - "methods": [ - "get_dataset" - ] - }, - "ImportData": { - "methods": [ - "import_data" - ] - }, - "ListAnnotations": { - "methods": [ - "list_annotations" - ] - }, - "ListDataItems": { - "methods": [ - "list_data_items" - ] - }, - "ListDatasets": { - "methods": [ - "list_datasets" - ] - }, - "UpdateDataset": { - "methods": [ - "update_dataset" - ] - } - } - }, - "grpc-async": { - "libraryClient": "DatasetServiceAsyncClient", - "rpcs": { - "CreateDataset": { - "methods": [ - "create_dataset" - ] - }, - "DeleteDataset": { - "methods": [ - "delete_dataset" - ] - }, - "ExportData": { - "methods": [ - "export_data" - ] - }, - "GetAnnotationSpec": { - "methods": [ - "get_annotation_spec" - ] - }, - "GetDataset": { - "methods": [ - "get_dataset" - ] - }, - "ImportData": { - "methods": [ - "import_data" - ] - }, - "ListAnnotations": { - "methods": [ - "list_annotations" - ] - }, - "ListDataItems": { - "methods": [ - "list_data_items" - ] - }, - "ListDatasets": { - "methods": [ - "list_datasets" - ] - }, - "UpdateDataset": { - "methods": [ - "update_dataset" - ] - } - } - } - } - }, - "EndpointService": { - "clients": { - "grpc": { - "libraryClient": "EndpointServiceClient", - "rpcs": { - "CreateEndpoint": { - "methods": [ - "create_endpoint" - ] - }, - "DeleteEndpoint": { - "methods": [ - "delete_endpoint" - ] - }, - "DeployModel": { - "methods": [ - "deploy_model" - ] - }, - "GetEndpoint": { - "methods": [ - "get_endpoint" - ] - }, - "ListEndpoints": { - "methods": [ - "list_endpoints" - ] - }, - "UndeployModel": { - "methods": [ - "undeploy_model" - ] - }, - "UpdateEndpoint": { - "methods": [ - "update_endpoint" - ] - } - } - }, - "grpc-async": { - "libraryClient": "EndpointServiceAsyncClient", - "rpcs": { - "CreateEndpoint": { - "methods": [ - "create_endpoint" - ] - }, - "DeleteEndpoint": { - "methods": [ - "delete_endpoint" - ] - }, - "DeployModel": { - "methods": [ - "deploy_model" - ] - }, - "GetEndpoint": { - "methods": [ - "get_endpoint" - ] - }, - "ListEndpoints": { - "methods": [ - "list_endpoints" - ] - }, - "UndeployModel": { - "methods": [ - "undeploy_model" - ] - }, - "UpdateEndpoint": { - "methods": [ - "update_endpoint" - ] - } - } - } - } - }, - "FeaturestoreOnlineServingService": { - "clients": { - "grpc": { - "libraryClient": "FeaturestoreOnlineServingServiceClient", - "rpcs": { - "ReadFeatureValues": { - "methods": [ - "read_feature_values" - ] - }, - "StreamingReadFeatureValues": { - "methods": [ - "streaming_read_feature_values" - ] - } - } - }, - "grpc-async": { - "libraryClient": "FeaturestoreOnlineServingServiceAsyncClient", - "rpcs": { - "ReadFeatureValues": { - "methods": [ - "read_feature_values" - ] - }, - "StreamingReadFeatureValues": { - "methods": [ - "streaming_read_feature_values" - ] - } - } - } - } - }, - "FeaturestoreService": { - "clients": { - "grpc": { - "libraryClient": "FeaturestoreServiceClient", - "rpcs": { - "BatchCreateFeatures": { - "methods": [ - "batch_create_features" - ] - }, - "BatchReadFeatureValues": { - "methods": [ - "batch_read_feature_values" - ] - }, - "CreateEntityType": { - "methods": [ - "create_entity_type" - ] - }, - "CreateFeature": { - "methods": [ - "create_feature" - ] - }, - "CreateFeaturestore": { - "methods": [ - "create_featurestore" - ] - }, - "DeleteEntityType": { - "methods": [ - "delete_entity_type" - ] - }, - "DeleteFeature": { - "methods": [ - "delete_feature" - ] - }, - "DeleteFeaturestore": { - "methods": [ - "delete_featurestore" - ] - }, - "ExportFeatureValues": { - "methods": [ - "export_feature_values" - ] - }, - "GetEntityType": { - "methods": [ - "get_entity_type" - ] - }, - "GetFeature": { - "methods": [ - "get_feature" - ] - }, - "GetFeaturestore": { - "methods": [ - "get_featurestore" - ] - }, - "ImportFeatureValues": { - "methods": [ - "import_feature_values" - ] - }, - "ListEntityTypes": { - "methods": [ - "list_entity_types" - ] - }, - "ListFeatures": { - "methods": [ - "list_features" - ] - }, - "ListFeaturestores": { - "methods": [ - "list_featurestores" - ] - }, - "SearchFeatures": { - "methods": [ - "search_features" - ] - }, - "UpdateEntityType": { - "methods": [ - "update_entity_type" - ] - }, - "UpdateFeature": { - "methods": [ - "update_feature" - ] - }, - "UpdateFeaturestore": { - "methods": [ - "update_featurestore" - ] - } - } - }, - "grpc-async": { - "libraryClient": "FeaturestoreServiceAsyncClient", - "rpcs": { - "BatchCreateFeatures": { - "methods": [ - "batch_create_features" - ] - }, - "BatchReadFeatureValues": { - "methods": [ - "batch_read_feature_values" - ] - }, - "CreateEntityType": { - "methods": [ - "create_entity_type" - ] - }, - "CreateFeature": { - "methods": [ - "create_feature" - ] - }, - "CreateFeaturestore": { - "methods": [ - "create_featurestore" - ] - }, - "DeleteEntityType": { - "methods": [ - "delete_entity_type" - ] - }, - "DeleteFeature": { - "methods": [ - "delete_feature" - ] - }, - "DeleteFeaturestore": { - "methods": [ - "delete_featurestore" - ] - }, - "ExportFeatureValues": { - "methods": [ - "export_feature_values" - ] - }, - "GetEntityType": { - "methods": [ - "get_entity_type" - ] - }, - "GetFeature": { - "methods": [ - "get_feature" - ] - }, - "GetFeaturestore": { - "methods": [ - "get_featurestore" - ] - }, - "ImportFeatureValues": { - "methods": [ - "import_feature_values" - ] - }, - "ListEntityTypes": { - "methods": [ - "list_entity_types" - ] - }, - "ListFeatures": { - "methods": [ - "list_features" - ] - }, - "ListFeaturestores": { - "methods": [ - "list_featurestores" - ] - }, - "SearchFeatures": { - "methods": [ - "search_features" - ] - }, - "UpdateEntityType": { - "methods": [ - "update_entity_type" - ] - }, - "UpdateFeature": { - "methods": [ - "update_feature" - ] - }, - "UpdateFeaturestore": { - "methods": [ - "update_featurestore" - ] - } - } - } - } - }, - "IndexEndpointService": { - "clients": { - "grpc": { - "libraryClient": "IndexEndpointServiceClient", - "rpcs": { - "CreateIndexEndpoint": { - "methods": [ - "create_index_endpoint" - ] - }, - "DeleteIndexEndpoint": { - "methods": [ - "delete_index_endpoint" - ] - }, - "DeployIndex": { - "methods": [ - "deploy_index" - ] - }, - "GetIndexEndpoint": { - "methods": [ - "get_index_endpoint" - ] - }, - "ListIndexEndpoints": { - "methods": [ - "list_index_endpoints" - ] - }, - "MutateDeployedIndex": { - "methods": [ - "mutate_deployed_index" - ] - }, - "UndeployIndex": { - "methods": [ - "undeploy_index" - ] - }, - "UpdateIndexEndpoint": { - "methods": [ - "update_index_endpoint" - ] - } - } - }, - "grpc-async": { - "libraryClient": "IndexEndpointServiceAsyncClient", - "rpcs": { - "CreateIndexEndpoint": { - "methods": [ - "create_index_endpoint" - ] - }, - "DeleteIndexEndpoint": { - "methods": [ - "delete_index_endpoint" - ] - }, - "DeployIndex": { - "methods": [ - "deploy_index" - ] - }, - "GetIndexEndpoint": { - "methods": [ - "get_index_endpoint" - ] - }, - "ListIndexEndpoints": { - "methods": [ - "list_index_endpoints" - ] - }, - "MutateDeployedIndex": { - "methods": [ - "mutate_deployed_index" - ] - }, - "UndeployIndex": { - "methods": [ - "undeploy_index" - ] - }, - "UpdateIndexEndpoint": { - "methods": [ - "update_index_endpoint" - ] - } - } - } - } - }, - "IndexService": { - "clients": { - "grpc": { - "libraryClient": "IndexServiceClient", - "rpcs": { - "CreateIndex": { - "methods": [ - "create_index" - ] - }, - "DeleteIndex": { - "methods": [ - "delete_index" - ] - }, - "GetIndex": { - "methods": [ - "get_index" - ] - }, - "ListIndexes": { - "methods": [ - "list_indexes" - ] - }, - "UpdateIndex": { - "methods": [ - "update_index" - ] - } - } - }, - "grpc-async": { - "libraryClient": "IndexServiceAsyncClient", - "rpcs": { - "CreateIndex": { - "methods": [ - "create_index" - ] - }, - "DeleteIndex": { - "methods": [ - "delete_index" - ] - }, - "GetIndex": { - "methods": [ - "get_index" - ] - }, - "ListIndexes": { - "methods": [ - "list_indexes" - ] - }, - "UpdateIndex": { - "methods": [ - "update_index" - ] - } - } - } - } - }, - "JobService": { - "clients": { - "grpc": { - "libraryClient": "JobServiceClient", - "rpcs": { - "CancelBatchPredictionJob": { - "methods": [ - "cancel_batch_prediction_job" - ] - }, - "CancelCustomJob": { - "methods": [ - "cancel_custom_job" - ] - }, - "CancelDataLabelingJob": { - "methods": [ - "cancel_data_labeling_job" - ] - }, - "CancelHyperparameterTuningJob": { - "methods": [ - "cancel_hyperparameter_tuning_job" - ] - }, - "CreateBatchPredictionJob": { - "methods": [ - "create_batch_prediction_job" - ] - }, - "CreateCustomJob": { - "methods": [ - "create_custom_job" - ] - }, - "CreateDataLabelingJob": { - "methods": [ - "create_data_labeling_job" - ] - }, - "CreateHyperparameterTuningJob": { - "methods": [ - "create_hyperparameter_tuning_job" - ] - }, - "CreateModelDeploymentMonitoringJob": { - "methods": [ - "create_model_deployment_monitoring_job" - ] - }, - "DeleteBatchPredictionJob": { - "methods": [ - "delete_batch_prediction_job" - ] - }, - "DeleteCustomJob": { - "methods": [ - "delete_custom_job" - ] - }, - "DeleteDataLabelingJob": { - "methods": [ - "delete_data_labeling_job" - ] - }, - "DeleteHyperparameterTuningJob": { - "methods": [ - "delete_hyperparameter_tuning_job" - ] - }, - "DeleteModelDeploymentMonitoringJob": { - "methods": [ - "delete_model_deployment_monitoring_job" - ] - }, - "GetBatchPredictionJob": { - "methods": [ - "get_batch_prediction_job" - ] - }, - "GetCustomJob": { - "methods": [ - "get_custom_job" - ] - }, - "GetDataLabelingJob": { - "methods": [ - "get_data_labeling_job" - ] - }, - "GetHyperparameterTuningJob": { - "methods": [ - "get_hyperparameter_tuning_job" - ] - }, - "GetModelDeploymentMonitoringJob": { - "methods": [ - "get_model_deployment_monitoring_job" - ] - }, - "ListBatchPredictionJobs": { - "methods": [ - "list_batch_prediction_jobs" - ] - }, - "ListCustomJobs": { - "methods": [ - "list_custom_jobs" - ] - }, - "ListDataLabelingJobs": { - "methods": [ - "list_data_labeling_jobs" - ] - }, - "ListHyperparameterTuningJobs": { - "methods": [ - "list_hyperparameter_tuning_jobs" - ] - }, - "ListModelDeploymentMonitoringJobs": { - "methods": [ - "list_model_deployment_monitoring_jobs" - ] - }, - "PauseModelDeploymentMonitoringJob": { - "methods": [ - "pause_model_deployment_monitoring_job" - ] - }, - "ResumeModelDeploymentMonitoringJob": { - "methods": [ - "resume_model_deployment_monitoring_job" - ] - }, - "SearchModelDeploymentMonitoringStatsAnomalies": { - "methods": [ - "search_model_deployment_monitoring_stats_anomalies" - ] - }, - "UpdateModelDeploymentMonitoringJob": { - "methods": [ - "update_model_deployment_monitoring_job" - ] - } - } - }, - "grpc-async": { - "libraryClient": "JobServiceAsyncClient", - "rpcs": { - "CancelBatchPredictionJob": { - "methods": [ - "cancel_batch_prediction_job" - ] - }, - "CancelCustomJob": { - "methods": [ - "cancel_custom_job" - ] - }, - "CancelDataLabelingJob": { - "methods": [ - "cancel_data_labeling_job" - ] - }, - "CancelHyperparameterTuningJob": { - "methods": [ - "cancel_hyperparameter_tuning_job" - ] - }, - "CreateBatchPredictionJob": { - "methods": [ - "create_batch_prediction_job" - ] - }, - "CreateCustomJob": { - "methods": [ - "create_custom_job" - ] - }, - "CreateDataLabelingJob": { - "methods": [ - "create_data_labeling_job" - ] - }, - "CreateHyperparameterTuningJob": { - "methods": [ - "create_hyperparameter_tuning_job" - ] - }, - "CreateModelDeploymentMonitoringJob": { - "methods": [ - "create_model_deployment_monitoring_job" - ] - }, - "DeleteBatchPredictionJob": { - "methods": [ - "delete_batch_prediction_job" - ] - }, - "DeleteCustomJob": { - "methods": [ - "delete_custom_job" - ] - }, - "DeleteDataLabelingJob": { - "methods": [ - "delete_data_labeling_job" - ] - }, - "DeleteHyperparameterTuningJob": { - "methods": [ - "delete_hyperparameter_tuning_job" - ] - }, - "DeleteModelDeploymentMonitoringJob": { - "methods": [ - "delete_model_deployment_monitoring_job" - ] - }, - "GetBatchPredictionJob": { - "methods": [ - "get_batch_prediction_job" - ] - }, - "GetCustomJob": { - "methods": [ - "get_custom_job" - ] - }, - "GetDataLabelingJob": { - "methods": [ - "get_data_labeling_job" - ] - }, - "GetHyperparameterTuningJob": { - "methods": [ - "get_hyperparameter_tuning_job" - ] - }, - "GetModelDeploymentMonitoringJob": { - "methods": [ - "get_model_deployment_monitoring_job" - ] - }, - "ListBatchPredictionJobs": { - "methods": [ - "list_batch_prediction_jobs" - ] - }, - "ListCustomJobs": { - "methods": [ - "list_custom_jobs" - ] - }, - "ListDataLabelingJobs": { - "methods": [ - "list_data_labeling_jobs" - ] - }, - "ListHyperparameterTuningJobs": { - "methods": [ - "list_hyperparameter_tuning_jobs" - ] - }, - "ListModelDeploymentMonitoringJobs": { - "methods": [ - "list_model_deployment_monitoring_jobs" - ] - }, - "PauseModelDeploymentMonitoringJob": { - "methods": [ - "pause_model_deployment_monitoring_job" - ] - }, - "ResumeModelDeploymentMonitoringJob": { - "methods": [ - "resume_model_deployment_monitoring_job" - ] - }, - "SearchModelDeploymentMonitoringStatsAnomalies": { - "methods": [ - "search_model_deployment_monitoring_stats_anomalies" - ] - }, - "UpdateModelDeploymentMonitoringJob": { - "methods": [ - "update_model_deployment_monitoring_job" - ] - } - } - } - } - }, - "MetadataService": { - "clients": { - "grpc": { - "libraryClient": "MetadataServiceClient", - "rpcs": { - "AddContextArtifactsAndExecutions": { - "methods": [ - "add_context_artifacts_and_executions" - ] - }, - "AddContextChildren": { - "methods": [ - "add_context_children" - ] - }, - "AddExecutionEvents": { - "methods": [ - "add_execution_events" - ] - }, - "CreateArtifact": { - "methods": [ - "create_artifact" - ] - }, - "CreateContext": { - "methods": [ - "create_context" - ] - }, - "CreateExecution": { - "methods": [ - "create_execution" - ] - }, - "CreateMetadataSchema": { - "methods": [ - "create_metadata_schema" - ] - }, - "CreateMetadataStore": { - "methods": [ - "create_metadata_store" - ] - }, - "DeleteArtifact": { - "methods": [ - "delete_artifact" - ] - }, - "DeleteContext": { - "methods": [ - "delete_context" - ] - }, - "DeleteExecution": { - "methods": [ - "delete_execution" - ] - }, - "DeleteMetadataStore": { - "methods": [ - "delete_metadata_store" - ] - }, - "GetArtifact": { - "methods": [ - "get_artifact" - ] - }, - "GetContext": { - "methods": [ - "get_context" - ] - }, - "GetExecution": { - "methods": [ - "get_execution" - ] - }, - "GetMetadataSchema": { - "methods": [ - "get_metadata_schema" - ] - }, - "GetMetadataStore": { - "methods": [ - "get_metadata_store" - ] - }, - "ListArtifacts": { - "methods": [ - "list_artifacts" - ] - }, - "ListContexts": { - "methods": [ - "list_contexts" - ] - }, - "ListExecutions": { - "methods": [ - "list_executions" - ] - }, - "ListMetadataSchemas": { - "methods": [ - "list_metadata_schemas" - ] - }, - "ListMetadataStores": { - "methods": [ - "list_metadata_stores" - ] - }, - "PurgeArtifacts": { - "methods": [ - "purge_artifacts" - ] - }, - "PurgeContexts": { - "methods": [ - "purge_contexts" - ] - }, - "PurgeExecutions": { - "methods": [ - "purge_executions" - ] - }, - "QueryArtifactLineageSubgraph": { - "methods": [ - "query_artifact_lineage_subgraph" - ] - }, - "QueryContextLineageSubgraph": { - "methods": [ - "query_context_lineage_subgraph" - ] - }, - "QueryExecutionInputsAndOutputs": { - "methods": [ - "query_execution_inputs_and_outputs" - ] - }, - "UpdateArtifact": { - "methods": [ - "update_artifact" - ] - }, - "UpdateContext": { - "methods": [ - "update_context" - ] - }, - "UpdateExecution": { - "methods": [ - "update_execution" - ] - } - } - }, - "grpc-async": { - "libraryClient": "MetadataServiceAsyncClient", - "rpcs": { - "AddContextArtifactsAndExecutions": { - "methods": [ - "add_context_artifacts_and_executions" - ] - }, - "AddContextChildren": { - "methods": [ - "add_context_children" - ] - }, - "AddExecutionEvents": { - "methods": [ - "add_execution_events" - ] - }, - "CreateArtifact": { - "methods": [ - "create_artifact" - ] - }, - "CreateContext": { - "methods": [ - "create_context" - ] - }, - "CreateExecution": { - "methods": [ - "create_execution" - ] - }, - "CreateMetadataSchema": { - "methods": [ - "create_metadata_schema" - ] - }, - "CreateMetadataStore": { - "methods": [ - "create_metadata_store" - ] - }, - "DeleteArtifact": { - "methods": [ - "delete_artifact" - ] - }, - "DeleteContext": { - "methods": [ - "delete_context" - ] - }, - "DeleteExecution": { - "methods": [ - "delete_execution" - ] - }, - "DeleteMetadataStore": { - "methods": [ - "delete_metadata_store" - ] - }, - "GetArtifact": { - "methods": [ - "get_artifact" - ] - }, - "GetContext": { - "methods": [ - "get_context" - ] - }, - "GetExecution": { - "methods": [ - "get_execution" - ] - }, - "GetMetadataSchema": { - "methods": [ - "get_metadata_schema" - ] - }, - "GetMetadataStore": { - "methods": [ - "get_metadata_store" - ] - }, - "ListArtifacts": { - "methods": [ - "list_artifacts" - ] - }, - "ListContexts": { - "methods": [ - "list_contexts" - ] - }, - "ListExecutions": { - "methods": [ - "list_executions" - ] - }, - "ListMetadataSchemas": { - "methods": [ - "list_metadata_schemas" - ] - }, - "ListMetadataStores": { - "methods": [ - "list_metadata_stores" - ] - }, - "PurgeArtifacts": { - "methods": [ - "purge_artifacts" - ] - }, - "PurgeContexts": { - "methods": [ - "purge_contexts" - ] - }, - "PurgeExecutions": { - "methods": [ - "purge_executions" - ] - }, - "QueryArtifactLineageSubgraph": { - "methods": [ - "query_artifact_lineage_subgraph" - ] - }, - "QueryContextLineageSubgraph": { - "methods": [ - "query_context_lineage_subgraph" - ] - }, - "QueryExecutionInputsAndOutputs": { - "methods": [ - "query_execution_inputs_and_outputs" - ] - }, - "UpdateArtifact": { - "methods": [ - "update_artifact" - ] - }, - "UpdateContext": { - "methods": [ - "update_context" - ] - }, - "UpdateExecution": { - "methods": [ - "update_execution" - ] - } - } - } - } - }, - "MigrationService": { - "clients": { - "grpc": { - "libraryClient": "MigrationServiceClient", - "rpcs": { - "BatchMigrateResources": { - "methods": [ - "batch_migrate_resources" - ] - }, - "SearchMigratableResources": { - "methods": [ - "search_migratable_resources" - ] - } - } - }, - "grpc-async": { - "libraryClient": "MigrationServiceAsyncClient", - "rpcs": { - "BatchMigrateResources": { - "methods": [ - "batch_migrate_resources" - ] - }, - "SearchMigratableResources": { - "methods": [ - "search_migratable_resources" - ] - } - } - } - } - }, - "ModelService": { - "clients": { - "grpc": { - "libraryClient": "ModelServiceClient", - "rpcs": { - "DeleteModel": { - "methods": [ - "delete_model" - ] - }, - "ExportModel": { - "methods": [ - "export_model" - ] - }, - "GetModel": { - "methods": [ - "get_model" - ] - }, - "GetModelEvaluation": { - "methods": [ - "get_model_evaluation" - ] - }, - "GetModelEvaluationSlice": { - "methods": [ - "get_model_evaluation_slice" - ] - }, - "ListModelEvaluationSlices": { - "methods": [ - "list_model_evaluation_slices" - ] - }, - "ListModelEvaluations": { - "methods": [ - "list_model_evaluations" - ] - }, - "ListModels": { - "methods": [ - "list_models" - ] - }, - "UpdateModel": { - "methods": [ - "update_model" - ] - }, - "UploadModel": { - "methods": [ - "upload_model" - ] - } - } - }, - "grpc-async": { - "libraryClient": "ModelServiceAsyncClient", - "rpcs": { - "DeleteModel": { - "methods": [ - "delete_model" - ] - }, - "ExportModel": { - "methods": [ - "export_model" - ] - }, - "GetModel": { - "methods": [ - "get_model" - ] - }, - "GetModelEvaluation": { - "methods": [ - "get_model_evaluation" - ] - }, - "GetModelEvaluationSlice": { - "methods": [ - "get_model_evaluation_slice" - ] - }, - "ListModelEvaluationSlices": { - "methods": [ - "list_model_evaluation_slices" - ] - }, - "ListModelEvaluations": { - "methods": [ - "list_model_evaluations" - ] - }, - "ListModels": { - "methods": [ - "list_models" - ] - }, - "UpdateModel": { - "methods": [ - "update_model" - ] - }, - "UploadModel": { - "methods": [ - "upload_model" - ] - } - } - } - } - }, - "PipelineService": { - "clients": { - "grpc": { - "libraryClient": "PipelineServiceClient", - "rpcs": { - "CancelPipelineJob": { - "methods": [ - "cancel_pipeline_job" - ] - }, - "CancelTrainingPipeline": { - "methods": [ - "cancel_training_pipeline" - ] - }, - "CreatePipelineJob": { - "methods": [ - "create_pipeline_job" - ] - }, - "CreateTrainingPipeline": { - "methods": [ - "create_training_pipeline" - ] - }, - "DeletePipelineJob": { - "methods": [ - "delete_pipeline_job" - ] - }, - "DeleteTrainingPipeline": { - "methods": [ - "delete_training_pipeline" - ] - }, - "GetPipelineJob": { - "methods": [ - "get_pipeline_job" - ] - }, - "GetTrainingPipeline": { - "methods": [ - "get_training_pipeline" - ] - }, - "ListPipelineJobs": { - "methods": [ - "list_pipeline_jobs" - ] - }, - "ListTrainingPipelines": { - "methods": [ - "list_training_pipelines" - ] - } - } - }, - "grpc-async": { - "libraryClient": "PipelineServiceAsyncClient", - "rpcs": { - "CancelPipelineJob": { - "methods": [ - "cancel_pipeline_job" - ] - }, - "CancelTrainingPipeline": { - "methods": [ - "cancel_training_pipeline" - ] - }, - "CreatePipelineJob": { - "methods": [ - "create_pipeline_job" - ] - }, - "CreateTrainingPipeline": { - "methods": [ - "create_training_pipeline" - ] - }, - "DeletePipelineJob": { - "methods": [ - "delete_pipeline_job" - ] - }, - "DeleteTrainingPipeline": { - "methods": [ - "delete_training_pipeline" - ] - }, - "GetPipelineJob": { - "methods": [ - "get_pipeline_job" - ] - }, - "GetTrainingPipeline": { - "methods": [ - "get_training_pipeline" - ] - }, - "ListPipelineJobs": { - "methods": [ - "list_pipeline_jobs" - ] - }, - "ListTrainingPipelines": { - "methods": [ - "list_training_pipelines" - ] - } - } - } - } - }, - "PredictionService": { - "clients": { - "grpc": { - "libraryClient": "PredictionServiceClient", - "rpcs": { - "Explain": { - "methods": [ - "explain" - ] - }, - "Predict": { - "methods": [ - "predict" - ] - }, - "RawPredict": { - "methods": [ - "raw_predict" - ] - } - } - }, - "grpc-async": { - "libraryClient": "PredictionServiceAsyncClient", - "rpcs": { - "Explain": { - "methods": [ - "explain" - ] - }, - "Predict": { - "methods": [ - "predict" - ] - }, - "RawPredict": { - "methods": [ - "raw_predict" - ] - } - } - } - } - }, - "SpecialistPoolService": { - "clients": { - "grpc": { - "libraryClient": "SpecialistPoolServiceClient", - "rpcs": { - "CreateSpecialistPool": { - "methods": [ - "create_specialist_pool" - ] - }, - "DeleteSpecialistPool": { - "methods": [ - "delete_specialist_pool" - ] - }, - "GetSpecialistPool": { - "methods": [ - "get_specialist_pool" - ] - }, - "ListSpecialistPools": { - "methods": [ - "list_specialist_pools" - ] - }, - "UpdateSpecialistPool": { - "methods": [ - "update_specialist_pool" - ] - } - } - }, - "grpc-async": { - "libraryClient": "SpecialistPoolServiceAsyncClient", - "rpcs": { - "CreateSpecialistPool": { - "methods": [ - "create_specialist_pool" - ] - }, - "DeleteSpecialistPool": { - "methods": [ - "delete_specialist_pool" - ] - }, - "GetSpecialistPool": { - "methods": [ - "get_specialist_pool" - ] - }, - "ListSpecialistPools": { - "methods": [ - "list_specialist_pools" - ] - }, - "UpdateSpecialistPool": { - "methods": [ - "update_specialist_pool" - ] - } - } - } - } - }, - "TensorboardService": { - "clients": { - "grpc": { - "libraryClient": "TensorboardServiceClient", - "rpcs": { - "BatchCreateTensorboardRuns": { - "methods": [ - "batch_create_tensorboard_runs" - ] - }, - "BatchCreateTensorboardTimeSeries": { - "methods": [ - "batch_create_tensorboard_time_series" - ] - }, - "BatchReadTensorboardTimeSeriesData": { - "methods": [ - "batch_read_tensorboard_time_series_data" - ] - }, - "CreateTensorboard": { - "methods": [ - "create_tensorboard" - ] - }, - "CreateTensorboardExperiment": { - "methods": [ - "create_tensorboard_experiment" - ] - }, - "CreateTensorboardRun": { - "methods": [ - "create_tensorboard_run" - ] - }, - "CreateTensorboardTimeSeries": { - "methods": [ - "create_tensorboard_time_series" - ] - }, - "DeleteTensorboard": { - "methods": [ - "delete_tensorboard" - ] - }, - "DeleteTensorboardExperiment": { - "methods": [ - "delete_tensorboard_experiment" - ] - }, - "DeleteTensorboardRun": { - "methods": [ - "delete_tensorboard_run" - ] - }, - "DeleteTensorboardTimeSeries": { - "methods": [ - "delete_tensorboard_time_series" - ] - }, - "ExportTensorboardTimeSeriesData": { - "methods": [ - "export_tensorboard_time_series_data" - ] - }, - "GetTensorboard": { - "methods": [ - "get_tensorboard" - ] - }, - "GetTensorboardExperiment": { - "methods": [ - "get_tensorboard_experiment" - ] - }, - "GetTensorboardRun": { - "methods": [ - "get_tensorboard_run" - ] - }, - "GetTensorboardTimeSeries": { - "methods": [ - "get_tensorboard_time_series" - ] - }, - "ListTensorboardExperiments": { - "methods": [ - "list_tensorboard_experiments" - ] - }, - "ListTensorboardRuns": { - "methods": [ - "list_tensorboard_runs" - ] - }, - "ListTensorboardTimeSeries": { - "methods": [ - "list_tensorboard_time_series" - ] - }, - "ListTensorboards": { - "methods": [ - "list_tensorboards" - ] - }, - "ReadTensorboardBlobData": { - "methods": [ - "read_tensorboard_blob_data" - ] - }, - "ReadTensorboardTimeSeriesData": { - "methods": [ - "read_tensorboard_time_series_data" - ] - }, - "UpdateTensorboard": { - "methods": [ - "update_tensorboard" - ] - }, - "UpdateTensorboardExperiment": { - "methods": [ - "update_tensorboard_experiment" - ] - }, - "UpdateTensorboardRun": { - "methods": [ - "update_tensorboard_run" - ] - }, - "UpdateTensorboardTimeSeries": { - "methods": [ - "update_tensorboard_time_series" - ] - }, - "WriteTensorboardExperimentData": { - "methods": [ - "write_tensorboard_experiment_data" - ] - }, - "WriteTensorboardRunData": { - "methods": [ - "write_tensorboard_run_data" - ] - } - } - }, - "grpc-async": { - "libraryClient": "TensorboardServiceAsyncClient", - "rpcs": { - "BatchCreateTensorboardRuns": { - "methods": [ - "batch_create_tensorboard_runs" - ] - }, - "BatchCreateTensorboardTimeSeries": { - "methods": [ - "batch_create_tensorboard_time_series" - ] - }, - "BatchReadTensorboardTimeSeriesData": { - "methods": [ - "batch_read_tensorboard_time_series_data" - ] - }, - "CreateTensorboard": { - "methods": [ - "create_tensorboard" - ] - }, - "CreateTensorboardExperiment": { - "methods": [ - "create_tensorboard_experiment" - ] - }, - "CreateTensorboardRun": { - "methods": [ - "create_tensorboard_run" - ] - }, - "CreateTensorboardTimeSeries": { - "methods": [ - "create_tensorboard_time_series" - ] - }, - "DeleteTensorboard": { - "methods": [ - "delete_tensorboard" - ] - }, - "DeleteTensorboardExperiment": { - "methods": [ - "delete_tensorboard_experiment" - ] - }, - "DeleteTensorboardRun": { - "methods": [ - "delete_tensorboard_run" - ] - }, - "DeleteTensorboardTimeSeries": { - "methods": [ - "delete_tensorboard_time_series" - ] - }, - "ExportTensorboardTimeSeriesData": { - "methods": [ - "export_tensorboard_time_series_data" - ] - }, - "GetTensorboard": { - "methods": [ - "get_tensorboard" - ] - }, - "GetTensorboardExperiment": { - "methods": [ - "get_tensorboard_experiment" - ] - }, - "GetTensorboardRun": { - "methods": [ - "get_tensorboard_run" - ] - }, - "GetTensorboardTimeSeries": { - "methods": [ - "get_tensorboard_time_series" - ] - }, - "ListTensorboardExperiments": { - "methods": [ - "list_tensorboard_experiments" - ] - }, - "ListTensorboardRuns": { - "methods": [ - "list_tensorboard_runs" - ] - }, - "ListTensorboardTimeSeries": { - "methods": [ - "list_tensorboard_time_series" - ] - }, - "ListTensorboards": { - "methods": [ - "list_tensorboards" - ] - }, - "ReadTensorboardBlobData": { - "methods": [ - "read_tensorboard_blob_data" - ] - }, - "ReadTensorboardTimeSeriesData": { - "methods": [ - "read_tensorboard_time_series_data" - ] - }, - "UpdateTensorboard": { - "methods": [ - "update_tensorboard" - ] - }, - "UpdateTensorboardExperiment": { - "methods": [ - "update_tensorboard_experiment" - ] - }, - "UpdateTensorboardRun": { - "methods": [ - "update_tensorboard_run" - ] - }, - "UpdateTensorboardTimeSeries": { - "methods": [ - "update_tensorboard_time_series" - ] - }, - "WriteTensorboardExperimentData": { - "methods": [ - "write_tensorboard_experiment_data" - ] - }, - "WriteTensorboardRunData": { - "methods": [ - "write_tensorboard_run_data" - ] - } - } - } - } - }, - "VizierService": { - "clients": { - "grpc": { - "libraryClient": "VizierServiceClient", - "rpcs": { - "AddTrialMeasurement": { - "methods": [ - "add_trial_measurement" - ] - }, - "CheckTrialEarlyStoppingState": { - "methods": [ - "check_trial_early_stopping_state" - ] - }, - "CompleteTrial": { - "methods": [ - "complete_trial" - ] - }, - "CreateStudy": { - "methods": [ - "create_study" - ] - }, - "CreateTrial": { - "methods": [ - "create_trial" - ] - }, - "DeleteStudy": { - "methods": [ - "delete_study" - ] - }, - "DeleteTrial": { - "methods": [ - "delete_trial" - ] - }, - "GetStudy": { - "methods": [ - "get_study" - ] - }, - "GetTrial": { - "methods": [ - "get_trial" - ] - }, - "ListOptimalTrials": { - "methods": [ - "list_optimal_trials" - ] - }, - "ListStudies": { - "methods": [ - "list_studies" - ] - }, - "ListTrials": { - "methods": [ - "list_trials" - ] - }, - "LookupStudy": { - "methods": [ - "lookup_study" - ] - }, - "StopTrial": { - "methods": [ - "stop_trial" - ] - }, - "SuggestTrials": { - "methods": [ - "suggest_trials" - ] - } - } - }, - "grpc-async": { - "libraryClient": "VizierServiceAsyncClient", - "rpcs": { - "AddTrialMeasurement": { - "methods": [ - "add_trial_measurement" - ] - }, - "CheckTrialEarlyStoppingState": { - "methods": [ - "check_trial_early_stopping_state" - ] - }, - "CompleteTrial": { - "methods": [ - "complete_trial" - ] - }, - "CreateStudy": { - "methods": [ - "create_study" - ] - }, - "CreateTrial": { - "methods": [ - "create_trial" - ] - }, - "DeleteStudy": { - "methods": [ - "delete_study" - ] - }, - "DeleteTrial": { - "methods": [ - "delete_trial" - ] - }, - "GetStudy": { - "methods": [ - "get_study" - ] - }, - "GetTrial": { - "methods": [ - "get_trial" - ] - }, - "ListOptimalTrials": { - "methods": [ - "list_optimal_trials" - ] - }, - "ListStudies": { - "methods": [ - "list_studies" - ] - }, - "ListTrials": { - "methods": [ - "list_trials" - ] - }, - "LookupStudy": { - "methods": [ - "lookup_study" - ] - }, - "StopTrial": { - "methods": [ - "stop_trial" - ] - }, - "SuggestTrials": { - "methods": [ - "suggest_trials" - ] - } - } - } - } - } - } -} diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/py.typed b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/py.typed deleted file mode 100644 index 228f1c51c6..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-aiplatform package uses inline types. diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/__init__.py deleted file mode 100644 index 4de65971c2..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/__init__.py deleted file mode 100644 index 44e8fb2115..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import DatasetServiceClient -from .async_client import DatasetServiceAsyncClient - -__all__ = ( - 'DatasetServiceClient', - 'DatasetServiceAsyncClient', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py deleted file mode 100644 index bd864f01f3..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/async_client.py +++ /dev/null @@ -1,1083 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core.client_options import ClientOptions -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.dataset_service import pagers -from google.cloud.aiplatform_v1beta1.types import annotation -from google.cloud.aiplatform_v1beta1.types import annotation_spec -from google.cloud.aiplatform_v1beta1.types import data_item -from google.cloud.aiplatform_v1beta1.types import dataset -from google.cloud.aiplatform_v1beta1.types import dataset as gca_dataset -from google.cloud.aiplatform_v1beta1.types import dataset_service -from google.cloud.aiplatform_v1beta1.types import encryption_spec -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import DatasetServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import DatasetServiceGrpcAsyncIOTransport -from .client import DatasetServiceClient - - -class DatasetServiceAsyncClient: - """The service that handles the CRUD of Vertex AI Dataset and - its child resources. - """ - - _client: DatasetServiceClient - - DEFAULT_ENDPOINT = DatasetServiceClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = DatasetServiceClient.DEFAULT_MTLS_ENDPOINT - - annotation_path = staticmethod(DatasetServiceClient.annotation_path) - parse_annotation_path = staticmethod(DatasetServiceClient.parse_annotation_path) - annotation_spec_path = staticmethod(DatasetServiceClient.annotation_spec_path) - parse_annotation_spec_path = staticmethod(DatasetServiceClient.parse_annotation_spec_path) - data_item_path = staticmethod(DatasetServiceClient.data_item_path) - parse_data_item_path = staticmethod(DatasetServiceClient.parse_data_item_path) - dataset_path = staticmethod(DatasetServiceClient.dataset_path) - parse_dataset_path = staticmethod(DatasetServiceClient.parse_dataset_path) - common_billing_account_path = staticmethod(DatasetServiceClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(DatasetServiceClient.parse_common_billing_account_path) - common_folder_path = staticmethod(DatasetServiceClient.common_folder_path) - parse_common_folder_path = staticmethod(DatasetServiceClient.parse_common_folder_path) - common_organization_path = staticmethod(DatasetServiceClient.common_organization_path) - parse_common_organization_path = staticmethod(DatasetServiceClient.parse_common_organization_path) - common_project_path = staticmethod(DatasetServiceClient.common_project_path) - parse_common_project_path = staticmethod(DatasetServiceClient.parse_common_project_path) - common_location_path = staticmethod(DatasetServiceClient.common_location_path) - parse_common_location_path = staticmethod(DatasetServiceClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - DatasetServiceAsyncClient: The constructed client. - """ - return DatasetServiceClient.from_service_account_info.__func__(DatasetServiceAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - DatasetServiceAsyncClient: The constructed client. - """ - return DatasetServiceClient.from_service_account_file.__func__(DatasetServiceAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> DatasetServiceTransport: - """Returns the transport used by the client instance. - - Returns: - DatasetServiceTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(DatasetServiceClient).get_transport_class, type(DatasetServiceClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, DatasetServiceTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the dataset service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.DatasetServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = DatasetServiceClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def create_dataset(self, - request: Union[dataset_service.CreateDatasetRequest, dict] = None, - *, - parent: str = None, - dataset: gca_dataset.Dataset = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Creates a Dataset. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CreateDatasetRequest, dict]): - The request object. Request message for - [DatasetService.CreateDataset][google.cloud.aiplatform.v1beta1.DatasetService.CreateDataset]. - parent (:class:`str`): - Required. The resource name of the Location to create - the Dataset in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - dataset (:class:`google.cloud.aiplatform_v1beta1.types.Dataset`): - Required. The Dataset to create. - This corresponds to the ``dataset`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.Dataset` A - collection of DataItems and Annotations on them. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, dataset]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = dataset_service.CreateDatasetRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if dataset is not None: - request.dataset = dataset - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_dataset, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - gca_dataset.Dataset, - metadata_type=dataset_service.CreateDatasetOperationMetadata, - ) - - # Done; return the response. - return response - - async def get_dataset(self, - request: Union[dataset_service.GetDatasetRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> dataset.Dataset: - r"""Gets a Dataset. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.GetDatasetRequest, dict]): - The request object. Request message for - [DatasetService.GetDataset][google.cloud.aiplatform.v1beta1.DatasetService.GetDataset]. - name (:class:`str`): - Required. The name of the Dataset - resource. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Dataset: - A collection of DataItems and - Annotations on them. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = dataset_service.GetDatasetRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_dataset, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_dataset(self, - request: Union[dataset_service.UpdateDatasetRequest, dict] = None, - *, - dataset: gca_dataset.Dataset = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_dataset.Dataset: - r"""Updates a Dataset. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.UpdateDatasetRequest, dict]): - The request object. Request message for - [DatasetService.UpdateDataset][google.cloud.aiplatform.v1beta1.DatasetService.UpdateDataset]. - dataset (:class:`google.cloud.aiplatform_v1beta1.types.Dataset`): - Required. The Dataset which replaces - the resource on the server. - - This corresponds to the ``dataset`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. The update mask applies to the resource. For - the ``FieldMask`` definition, see - [google.protobuf.FieldMask][google.protobuf.FieldMask]. - Updatable fields: - - - ``display_name`` - - ``description`` - - ``labels`` - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Dataset: - A collection of DataItems and - Annotations on them. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([dataset, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = dataset_service.UpdateDatasetRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if dataset is not None: - request.dataset = dataset - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_dataset, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("dataset.name", request.dataset.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_datasets(self, - request: Union[dataset_service.ListDatasetsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListDatasetsAsyncPager: - r"""Lists Datasets in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListDatasetsRequest, dict]): - The request object. Request message for - [DatasetService.ListDatasets][google.cloud.aiplatform.v1beta1.DatasetService.ListDatasets]. - parent (:class:`str`): - Required. The name of the Dataset's parent resource. - Format: ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.dataset_service.pagers.ListDatasetsAsyncPager: - Response message for - [DatasetService.ListDatasets][google.cloud.aiplatform.v1beta1.DatasetService.ListDatasets]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = dataset_service.ListDatasetsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_datasets, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListDatasetsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_dataset(self, - request: Union[dataset_service.DeleteDatasetRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a Dataset. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.DeleteDatasetRequest, dict]): - The request object. Request message for - [DatasetService.DeleteDataset][google.cloud.aiplatform.v1beta1.DatasetService.DeleteDataset]. - name (:class:`str`): - Required. The resource name of the Dataset to delete. - Format: - ``projects/{project}/locations/{location}/datasets/{dataset}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = dataset_service.DeleteDatasetRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_dataset, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def import_data(self, - request: Union[dataset_service.ImportDataRequest, dict] = None, - *, - name: str = None, - import_configs: Sequence[dataset.ImportDataConfig] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Imports data into a Dataset. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ImportDataRequest, dict]): - The request object. Request message for - [DatasetService.ImportData][google.cloud.aiplatform.v1beta1.DatasetService.ImportData]. - name (:class:`str`): - Required. The name of the Dataset resource. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - import_configs (:class:`Sequence[google.cloud.aiplatform_v1beta1.types.ImportDataConfig]`): - Required. The desired input - locations. The contents of all input - locations will be imported in one batch. - - This corresponds to the ``import_configs`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.ImportDataResponse` - Response message for - [DatasetService.ImportData][google.cloud.aiplatform.v1beta1.DatasetService.ImportData]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, import_configs]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = dataset_service.ImportDataRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if import_configs: - request.import_configs.extend(import_configs) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.import_data, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - dataset_service.ImportDataResponse, - metadata_type=dataset_service.ImportDataOperationMetadata, - ) - - # Done; return the response. - return response - - async def export_data(self, - request: Union[dataset_service.ExportDataRequest, dict] = None, - *, - name: str = None, - export_config: dataset.ExportDataConfig = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Exports data from a Dataset. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ExportDataRequest, dict]): - The request object. Request message for - [DatasetService.ExportData][google.cloud.aiplatform.v1beta1.DatasetService.ExportData]. - name (:class:`str`): - Required. The name of the Dataset resource. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - export_config (:class:`google.cloud.aiplatform_v1beta1.types.ExportDataConfig`): - Required. The desired output - location. - - This corresponds to the ``export_config`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.ExportDataResponse` - Response message for - [DatasetService.ExportData][google.cloud.aiplatform.v1beta1.DatasetService.ExportData]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, export_config]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = dataset_service.ExportDataRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if export_config is not None: - request.export_config = export_config - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.export_data, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - dataset_service.ExportDataResponse, - metadata_type=dataset_service.ExportDataOperationMetadata, - ) - - # Done; return the response. - return response - - async def list_data_items(self, - request: Union[dataset_service.ListDataItemsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListDataItemsAsyncPager: - r"""Lists DataItems in a Dataset. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListDataItemsRequest, dict]): - The request object. Request message for - [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems]. - parent (:class:`str`): - Required. The resource name of the Dataset to list - DataItems from. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.dataset_service.pagers.ListDataItemsAsyncPager: - Response message for - [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = dataset_service.ListDataItemsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_data_items, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListDataItemsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_annotation_spec(self, - request: Union[dataset_service.GetAnnotationSpecRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> annotation_spec.AnnotationSpec: - r"""Gets an AnnotationSpec. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.GetAnnotationSpecRequest, dict]): - The request object. Request message for - [DatasetService.GetAnnotationSpec][google.cloud.aiplatform.v1beta1.DatasetService.GetAnnotationSpec]. - name (:class:`str`): - Required. The name of the AnnotationSpec resource. - Format: - ``projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.AnnotationSpec: - Identifies a concept with which - DataItems may be annotated with. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = dataset_service.GetAnnotationSpecRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_annotation_spec, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_annotations(self, - request: Union[dataset_service.ListAnnotationsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListAnnotationsAsyncPager: - r"""Lists Annotations belongs to a dataitem - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListAnnotationsRequest, dict]): - The request object. Request message for - [DatasetService.ListAnnotations][google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations]. - parent (:class:`str`): - Required. The resource name of the DataItem to list - Annotations from. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.dataset_service.pagers.ListAnnotationsAsyncPager: - Response message for - [DatasetService.ListAnnotations][google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = dataset_service.ListAnnotationsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_annotations, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListAnnotationsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def __aenter__(self): - return self - - async def __aexit__(self, exc_type, exc, tb): - await self.transport.close() - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "DatasetServiceAsyncClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/client.py deleted file mode 100644 index 1cfbec1cd6..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/client.py +++ /dev/null @@ -1,1308 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import os -import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.dataset_service import pagers -from google.cloud.aiplatform_v1beta1.types import annotation -from google.cloud.aiplatform_v1beta1.types import annotation_spec -from google.cloud.aiplatform_v1beta1.types import data_item -from google.cloud.aiplatform_v1beta1.types import dataset -from google.cloud.aiplatform_v1beta1.types import dataset as gca_dataset -from google.cloud.aiplatform_v1beta1.types import dataset_service -from google.cloud.aiplatform_v1beta1.types import encryption_spec -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import DatasetServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import DatasetServiceGrpcTransport -from .transports.grpc_asyncio import DatasetServiceGrpcAsyncIOTransport - - -class DatasetServiceClientMeta(type): - """Metaclass for the DatasetService client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[DatasetServiceTransport]] - _transport_registry["grpc"] = DatasetServiceGrpcTransport - _transport_registry["grpc_asyncio"] = DatasetServiceGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[DatasetServiceTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class DatasetServiceClient(metaclass=DatasetServiceClientMeta): - """The service that handles the CRUD of Vertex AI Dataset and - its child resources. - """ - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "aiplatform.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - DatasetServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - DatasetServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> DatasetServiceTransport: - """Returns the transport used by the client instance. - - Returns: - DatasetServiceTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def annotation_path(project: str,location: str,dataset: str,data_item: str,annotation: str,) -> str: - """Returns a fully-qualified annotation string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}/annotations/{annotation}".format(project=project, location=location, dataset=dataset, data_item=data_item, annotation=annotation, ) - - @staticmethod - def parse_annotation_path(path: str) -> Dict[str,str]: - """Parses a annotation path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)/dataItems/(?P.+?)/annotations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def annotation_spec_path(project: str,location: str,dataset: str,annotation_spec: str,) -> str: - """Returns a fully-qualified annotation_spec string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}".format(project=project, location=location, dataset=dataset, annotation_spec=annotation_spec, ) - - @staticmethod - def parse_annotation_spec_path(path: str) -> Dict[str,str]: - """Parses a annotation_spec path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)/annotationSpecs/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def data_item_path(project: str,location: str,dataset: str,data_item: str,) -> str: - """Returns a fully-qualified data_item string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}".format(project=project, location=location, dataset=dataset, data_item=data_item, ) - - @staticmethod - def parse_data_item_path(path: str) -> Dict[str,str]: - """Parses a data_item path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)/dataItems/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def dataset_path(project: str,location: str,dataset: str,) -> str: - """Returns a fully-qualified dataset string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) - - @staticmethod - def parse_dataset_path(path: str) -> Dict[str,str]: - """Parses a dataset path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, DatasetServiceTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the dataset service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, DatasetServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): - raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, DatasetServiceTransport): - # transport is a DatasetServiceTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - always_use_jwt_access=True, - ) - - def create_dataset(self, - request: Union[dataset_service.CreateDatasetRequest, dict] = None, - *, - parent: str = None, - dataset: gca_dataset.Dataset = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Creates a Dataset. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CreateDatasetRequest, dict]): - The request object. Request message for - [DatasetService.CreateDataset][google.cloud.aiplatform.v1beta1.DatasetService.CreateDataset]. - parent (str): - Required. The resource name of the Location to create - the Dataset in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - dataset (google.cloud.aiplatform_v1beta1.types.Dataset): - Required. The Dataset to create. - This corresponds to the ``dataset`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.Dataset` A - collection of DataItems and Annotations on them. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, dataset]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a dataset_service.CreateDatasetRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, dataset_service.CreateDatasetRequest): - request = dataset_service.CreateDatasetRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if dataset is not None: - request.dataset = dataset - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_dataset] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - gca_dataset.Dataset, - metadata_type=dataset_service.CreateDatasetOperationMetadata, - ) - - # Done; return the response. - return response - - def get_dataset(self, - request: Union[dataset_service.GetDatasetRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> dataset.Dataset: - r"""Gets a Dataset. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.GetDatasetRequest, dict]): - The request object. Request message for - [DatasetService.GetDataset][google.cloud.aiplatform.v1beta1.DatasetService.GetDataset]. - name (str): - Required. The name of the Dataset - resource. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Dataset: - A collection of DataItems and - Annotations on them. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a dataset_service.GetDatasetRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, dataset_service.GetDatasetRequest): - request = dataset_service.GetDatasetRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_dataset] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_dataset(self, - request: Union[dataset_service.UpdateDatasetRequest, dict] = None, - *, - dataset: gca_dataset.Dataset = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_dataset.Dataset: - r"""Updates a Dataset. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.UpdateDatasetRequest, dict]): - The request object. Request message for - [DatasetService.UpdateDataset][google.cloud.aiplatform.v1beta1.DatasetService.UpdateDataset]. - dataset (google.cloud.aiplatform_v1beta1.types.Dataset): - Required. The Dataset which replaces - the resource on the server. - - This corresponds to the ``dataset`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The update mask applies to the resource. For - the ``FieldMask`` definition, see - [google.protobuf.FieldMask][google.protobuf.FieldMask]. - Updatable fields: - - - ``display_name`` - - ``description`` - - ``labels`` - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Dataset: - A collection of DataItems and - Annotations on them. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([dataset, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a dataset_service.UpdateDatasetRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, dataset_service.UpdateDatasetRequest): - request = dataset_service.UpdateDatasetRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if dataset is not None: - request.dataset = dataset - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_dataset] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("dataset.name", request.dataset.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_datasets(self, - request: Union[dataset_service.ListDatasetsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListDatasetsPager: - r"""Lists Datasets in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListDatasetsRequest, dict]): - The request object. Request message for - [DatasetService.ListDatasets][google.cloud.aiplatform.v1beta1.DatasetService.ListDatasets]. - parent (str): - Required. The name of the Dataset's parent resource. - Format: ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.dataset_service.pagers.ListDatasetsPager: - Response message for - [DatasetService.ListDatasets][google.cloud.aiplatform.v1beta1.DatasetService.ListDatasets]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a dataset_service.ListDatasetsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, dataset_service.ListDatasetsRequest): - request = dataset_service.ListDatasetsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_datasets] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListDatasetsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_dataset(self, - request: Union[dataset_service.DeleteDatasetRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a Dataset. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.DeleteDatasetRequest, dict]): - The request object. Request message for - [DatasetService.DeleteDataset][google.cloud.aiplatform.v1beta1.DatasetService.DeleteDataset]. - name (str): - Required. The resource name of the Dataset to delete. - Format: - ``projects/{project}/locations/{location}/datasets/{dataset}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a dataset_service.DeleteDatasetRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, dataset_service.DeleteDatasetRequest): - request = dataset_service.DeleteDatasetRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_dataset] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def import_data(self, - request: Union[dataset_service.ImportDataRequest, dict] = None, - *, - name: str = None, - import_configs: Sequence[dataset.ImportDataConfig] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Imports data into a Dataset. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ImportDataRequest, dict]): - The request object. Request message for - [DatasetService.ImportData][google.cloud.aiplatform.v1beta1.DatasetService.ImportData]. - name (str): - Required. The name of the Dataset resource. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - import_configs (Sequence[google.cloud.aiplatform_v1beta1.types.ImportDataConfig]): - Required. The desired input - locations. The contents of all input - locations will be imported in one batch. - - This corresponds to the ``import_configs`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.ImportDataResponse` - Response message for - [DatasetService.ImportData][google.cloud.aiplatform.v1beta1.DatasetService.ImportData]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, import_configs]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a dataset_service.ImportDataRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, dataset_service.ImportDataRequest): - request = dataset_service.ImportDataRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if import_configs is not None: - request.import_configs = import_configs - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.import_data] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - dataset_service.ImportDataResponse, - metadata_type=dataset_service.ImportDataOperationMetadata, - ) - - # Done; return the response. - return response - - def export_data(self, - request: Union[dataset_service.ExportDataRequest, dict] = None, - *, - name: str = None, - export_config: dataset.ExportDataConfig = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Exports data from a Dataset. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ExportDataRequest, dict]): - The request object. Request message for - [DatasetService.ExportData][google.cloud.aiplatform.v1beta1.DatasetService.ExportData]. - name (str): - Required. The name of the Dataset resource. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - export_config (google.cloud.aiplatform_v1beta1.types.ExportDataConfig): - Required. The desired output - location. - - This corresponds to the ``export_config`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.ExportDataResponse` - Response message for - [DatasetService.ExportData][google.cloud.aiplatform.v1beta1.DatasetService.ExportData]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, export_config]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a dataset_service.ExportDataRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, dataset_service.ExportDataRequest): - request = dataset_service.ExportDataRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if export_config is not None: - request.export_config = export_config - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.export_data] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - dataset_service.ExportDataResponse, - metadata_type=dataset_service.ExportDataOperationMetadata, - ) - - # Done; return the response. - return response - - def list_data_items(self, - request: Union[dataset_service.ListDataItemsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListDataItemsPager: - r"""Lists DataItems in a Dataset. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListDataItemsRequest, dict]): - The request object. Request message for - [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems]. - parent (str): - Required. The resource name of the Dataset to list - DataItems from. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.dataset_service.pagers.ListDataItemsPager: - Response message for - [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a dataset_service.ListDataItemsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, dataset_service.ListDataItemsRequest): - request = dataset_service.ListDataItemsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_data_items] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListDataItemsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_annotation_spec(self, - request: Union[dataset_service.GetAnnotationSpecRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> annotation_spec.AnnotationSpec: - r"""Gets an AnnotationSpec. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.GetAnnotationSpecRequest, dict]): - The request object. Request message for - [DatasetService.GetAnnotationSpec][google.cloud.aiplatform.v1beta1.DatasetService.GetAnnotationSpec]. - name (str): - Required. The name of the AnnotationSpec resource. - Format: - ``projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.AnnotationSpec: - Identifies a concept with which - DataItems may be annotated with. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a dataset_service.GetAnnotationSpecRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, dataset_service.GetAnnotationSpecRequest): - request = dataset_service.GetAnnotationSpecRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_annotation_spec] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_annotations(self, - request: Union[dataset_service.ListAnnotationsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListAnnotationsPager: - r"""Lists Annotations belongs to a dataitem - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListAnnotationsRequest, dict]): - The request object. Request message for - [DatasetService.ListAnnotations][google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations]. - parent (str): - Required. The resource name of the DataItem to list - Annotations from. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.dataset_service.pagers.ListAnnotationsPager: - Response message for - [DatasetService.ListAnnotations][google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a dataset_service.ListAnnotationsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, dataset_service.ListAnnotationsRequest): - request = dataset_service.ListAnnotationsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_annotations] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListAnnotationsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - """Releases underlying transport's resources. - - .. warning:: - ONLY use as a context manager if the transport is NOT shared - with other clients! Exiting the with block will CLOSE the transport - and may cause errors in other clients! - """ - self.transport.close() - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "DatasetServiceClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/pagers.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/pagers.py deleted file mode 100644 index 8fe86eec31..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/pagers.py +++ /dev/null @@ -1,387 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator - -from google.cloud.aiplatform_v1beta1.types import annotation -from google.cloud.aiplatform_v1beta1.types import data_item -from google.cloud.aiplatform_v1beta1.types import dataset -from google.cloud.aiplatform_v1beta1.types import dataset_service - - -class ListDatasetsPager: - """A pager for iterating through ``list_datasets`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListDatasetsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``datasets`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListDatasets`` requests and continue to iterate - through the ``datasets`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListDatasetsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., dataset_service.ListDatasetsResponse], - request: dataset_service.ListDatasetsRequest, - response: dataset_service.ListDatasetsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListDatasetsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListDatasetsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = dataset_service.ListDatasetsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[dataset_service.ListDatasetsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[dataset.Dataset]: - for page in self.pages: - yield from page.datasets - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListDatasetsAsyncPager: - """A pager for iterating through ``list_datasets`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListDatasetsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``datasets`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListDatasets`` requests and continue to iterate - through the ``datasets`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListDatasetsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[dataset_service.ListDatasetsResponse]], - request: dataset_service.ListDatasetsRequest, - response: dataset_service.ListDatasetsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListDatasetsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListDatasetsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = dataset_service.ListDatasetsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[dataset_service.ListDatasetsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[dataset.Dataset]: - async def async_generator(): - async for page in self.pages: - for response in page.datasets: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListDataItemsPager: - """A pager for iterating through ``list_data_items`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListDataItemsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``data_items`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListDataItems`` requests and continue to iterate - through the ``data_items`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListDataItemsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., dataset_service.ListDataItemsResponse], - request: dataset_service.ListDataItemsRequest, - response: dataset_service.ListDataItemsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListDataItemsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListDataItemsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = dataset_service.ListDataItemsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[dataset_service.ListDataItemsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[data_item.DataItem]: - for page in self.pages: - yield from page.data_items - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListDataItemsAsyncPager: - """A pager for iterating through ``list_data_items`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListDataItemsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``data_items`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListDataItems`` requests and continue to iterate - through the ``data_items`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListDataItemsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[dataset_service.ListDataItemsResponse]], - request: dataset_service.ListDataItemsRequest, - response: dataset_service.ListDataItemsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListDataItemsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListDataItemsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = dataset_service.ListDataItemsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[dataset_service.ListDataItemsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[data_item.DataItem]: - async def async_generator(): - async for page in self.pages: - for response in page.data_items: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListAnnotationsPager: - """A pager for iterating through ``list_annotations`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListAnnotationsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``annotations`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListAnnotations`` requests and continue to iterate - through the ``annotations`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListAnnotationsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., dataset_service.ListAnnotationsResponse], - request: dataset_service.ListAnnotationsRequest, - response: dataset_service.ListAnnotationsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListAnnotationsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListAnnotationsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = dataset_service.ListAnnotationsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[dataset_service.ListAnnotationsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[annotation.Annotation]: - for page in self.pages: - yield from page.annotations - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListAnnotationsAsyncPager: - """A pager for iterating through ``list_annotations`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListAnnotationsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``annotations`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListAnnotations`` requests and continue to iterate - through the ``annotations`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListAnnotationsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[dataset_service.ListAnnotationsResponse]], - request: dataset_service.ListAnnotationsRequest, - response: dataset_service.ListAnnotationsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListAnnotationsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListAnnotationsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = dataset_service.ListAnnotationsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[dataset_service.ListAnnotationsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[annotation.Annotation]: - async def async_generator(): - async for page in self.pages: - for response in page.annotations: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/__init__.py deleted file mode 100644 index 561b0c5cfd..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import DatasetServiceTransport -from .grpc import DatasetServiceGrpcTransport -from .grpc_asyncio import DatasetServiceGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[DatasetServiceTransport]] -_transport_registry['grpc'] = DatasetServiceGrpcTransport -_transport_registry['grpc_asyncio'] = DatasetServiceGrpcAsyncIOTransport - -__all__ = ( - 'DatasetServiceTransport', - 'DatasetServiceGrpcTransport', - 'DatasetServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/base.py deleted file mode 100644 index fdc2089d3d..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/base.py +++ /dev/null @@ -1,282 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import pkg_resources - -import google.auth # type: ignore -import google.api_core -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.aiplatform_v1beta1.types import annotation_spec -from google.cloud.aiplatform_v1beta1.types import dataset -from google.cloud.aiplatform_v1beta1.types import dataset as gca_dataset -from google.cloud.aiplatform_v1beta1.types import dataset_service -from google.longrunning import operations_pb2 # type: ignore - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-aiplatform', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -class DatasetServiceTransport(abc.ABC): - """Abstract transport class for DatasetService.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'aiplatform.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials are service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.create_dataset: gapic_v1.method.wrap_method( - self.create_dataset, - default_timeout=5.0, - client_info=client_info, - ), - self.get_dataset: gapic_v1.method.wrap_method( - self.get_dataset, - default_timeout=5.0, - client_info=client_info, - ), - self.update_dataset: gapic_v1.method.wrap_method( - self.update_dataset, - default_timeout=5.0, - client_info=client_info, - ), - self.list_datasets: gapic_v1.method.wrap_method( - self.list_datasets, - default_timeout=5.0, - client_info=client_info, - ), - self.delete_dataset: gapic_v1.method.wrap_method( - self.delete_dataset, - default_timeout=5.0, - client_info=client_info, - ), - self.import_data: gapic_v1.method.wrap_method( - self.import_data, - default_timeout=5.0, - client_info=client_info, - ), - self.export_data: gapic_v1.method.wrap_method( - self.export_data, - default_timeout=5.0, - client_info=client_info, - ), - self.list_data_items: gapic_v1.method.wrap_method( - self.list_data_items, - default_timeout=5.0, - client_info=client_info, - ), - self.get_annotation_spec: gapic_v1.method.wrap_method( - self.get_annotation_spec, - default_timeout=5.0, - client_info=client_info, - ), - self.list_annotations: gapic_v1.method.wrap_method( - self.list_annotations, - default_timeout=5.0, - client_info=client_info, - ), - } - - def close(self): - """Closes resources associated with the transport. - - .. warning:: - Only call this method if the transport is NOT shared - with other clients - this may cause errors in other clients! - """ - raise NotImplementedError() - - @property - def operations_client(self): - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def create_dataset(self) -> Callable[ - [dataset_service.CreateDatasetRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def get_dataset(self) -> Callable[ - [dataset_service.GetDatasetRequest], - Union[ - dataset.Dataset, - Awaitable[dataset.Dataset] - ]]: - raise NotImplementedError() - - @property - def update_dataset(self) -> Callable[ - [dataset_service.UpdateDatasetRequest], - Union[ - gca_dataset.Dataset, - Awaitable[gca_dataset.Dataset] - ]]: - raise NotImplementedError() - - @property - def list_datasets(self) -> Callable[ - [dataset_service.ListDatasetsRequest], - Union[ - dataset_service.ListDatasetsResponse, - Awaitable[dataset_service.ListDatasetsResponse] - ]]: - raise NotImplementedError() - - @property - def delete_dataset(self) -> Callable[ - [dataset_service.DeleteDatasetRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def import_data(self) -> Callable[ - [dataset_service.ImportDataRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def export_data(self) -> Callable[ - [dataset_service.ExportDataRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def list_data_items(self) -> Callable[ - [dataset_service.ListDataItemsRequest], - Union[ - dataset_service.ListDataItemsResponse, - Awaitable[dataset_service.ListDataItemsResponse] - ]]: - raise NotImplementedError() - - @property - def get_annotation_spec(self) -> Callable[ - [dataset_service.GetAnnotationSpecRequest], - Union[ - annotation_spec.AnnotationSpec, - Awaitable[annotation_spec.AnnotationSpec] - ]]: - raise NotImplementedError() - - @property - def list_annotations(self) -> Callable[ - [dataset_service.ListAnnotationsRequest], - Union[ - dataset_service.ListAnnotationsResponse, - Awaitable[dataset_service.ListAnnotationsResponse] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'DatasetServiceTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc.py deleted file mode 100644 index c392c3579c..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc.py +++ /dev/null @@ -1,511 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers -from google.api_core import operations_v1 -from google.api_core import gapic_v1 -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.aiplatform_v1beta1.types import annotation_spec -from google.cloud.aiplatform_v1beta1.types import dataset -from google.cloud.aiplatform_v1beta1.types import dataset as gca_dataset -from google.cloud.aiplatform_v1beta1.types import dataset_service -from google.longrunning import operations_pb2 # type: ignore -from .base import DatasetServiceTransport, DEFAULT_CLIENT_INFO - - -class DatasetServiceGrpcTransport(DatasetServiceTransport): - """gRPC backend transport for DatasetService. - - The service that handles the CRUD of Vertex AI Dataset and - its child resources. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_dataset(self) -> Callable[ - [dataset_service.CreateDatasetRequest], - operations_pb2.Operation]: - r"""Return a callable for the create dataset method over gRPC. - - Creates a Dataset. - - Returns: - Callable[[~.CreateDatasetRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_dataset' not in self._stubs: - self._stubs['create_dataset'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.DatasetService/CreateDataset', - request_serializer=dataset_service.CreateDatasetRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_dataset'] - - @property - def get_dataset(self) -> Callable[ - [dataset_service.GetDatasetRequest], - dataset.Dataset]: - r"""Return a callable for the get dataset method over gRPC. - - Gets a Dataset. - - Returns: - Callable[[~.GetDatasetRequest], - ~.Dataset]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_dataset' not in self._stubs: - self._stubs['get_dataset'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.DatasetService/GetDataset', - request_serializer=dataset_service.GetDatasetRequest.serialize, - response_deserializer=dataset.Dataset.deserialize, - ) - return self._stubs['get_dataset'] - - @property - def update_dataset(self) -> Callable[ - [dataset_service.UpdateDatasetRequest], - gca_dataset.Dataset]: - r"""Return a callable for the update dataset method over gRPC. - - Updates a Dataset. - - Returns: - Callable[[~.UpdateDatasetRequest], - ~.Dataset]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_dataset' not in self._stubs: - self._stubs['update_dataset'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.DatasetService/UpdateDataset', - request_serializer=dataset_service.UpdateDatasetRequest.serialize, - response_deserializer=gca_dataset.Dataset.deserialize, - ) - return self._stubs['update_dataset'] - - @property - def list_datasets(self) -> Callable[ - [dataset_service.ListDatasetsRequest], - dataset_service.ListDatasetsResponse]: - r"""Return a callable for the list datasets method over gRPC. - - Lists Datasets in a Location. - - Returns: - Callable[[~.ListDatasetsRequest], - ~.ListDatasetsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_datasets' not in self._stubs: - self._stubs['list_datasets'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.DatasetService/ListDatasets', - request_serializer=dataset_service.ListDatasetsRequest.serialize, - response_deserializer=dataset_service.ListDatasetsResponse.deserialize, - ) - return self._stubs['list_datasets'] - - @property - def delete_dataset(self) -> Callable[ - [dataset_service.DeleteDatasetRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete dataset method over gRPC. - - Deletes a Dataset. - - Returns: - Callable[[~.DeleteDatasetRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_dataset' not in self._stubs: - self._stubs['delete_dataset'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.DatasetService/DeleteDataset', - request_serializer=dataset_service.DeleteDatasetRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_dataset'] - - @property - def import_data(self) -> Callable[ - [dataset_service.ImportDataRequest], - operations_pb2.Operation]: - r"""Return a callable for the import data method over gRPC. - - Imports data into a Dataset. - - Returns: - Callable[[~.ImportDataRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'import_data' not in self._stubs: - self._stubs['import_data'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.DatasetService/ImportData', - request_serializer=dataset_service.ImportDataRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['import_data'] - - @property - def export_data(self) -> Callable[ - [dataset_service.ExportDataRequest], - operations_pb2.Operation]: - r"""Return a callable for the export data method over gRPC. - - Exports data from a Dataset. - - Returns: - Callable[[~.ExportDataRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'export_data' not in self._stubs: - self._stubs['export_data'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.DatasetService/ExportData', - request_serializer=dataset_service.ExportDataRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['export_data'] - - @property - def list_data_items(self) -> Callable[ - [dataset_service.ListDataItemsRequest], - dataset_service.ListDataItemsResponse]: - r"""Return a callable for the list data items method over gRPC. - - Lists DataItems in a Dataset. - - Returns: - Callable[[~.ListDataItemsRequest], - ~.ListDataItemsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_data_items' not in self._stubs: - self._stubs['list_data_items'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.DatasetService/ListDataItems', - request_serializer=dataset_service.ListDataItemsRequest.serialize, - response_deserializer=dataset_service.ListDataItemsResponse.deserialize, - ) - return self._stubs['list_data_items'] - - @property - def get_annotation_spec(self) -> Callable[ - [dataset_service.GetAnnotationSpecRequest], - annotation_spec.AnnotationSpec]: - r"""Return a callable for the get annotation spec method over gRPC. - - Gets an AnnotationSpec. - - Returns: - Callable[[~.GetAnnotationSpecRequest], - ~.AnnotationSpec]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_annotation_spec' not in self._stubs: - self._stubs['get_annotation_spec'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.DatasetService/GetAnnotationSpec', - request_serializer=dataset_service.GetAnnotationSpecRequest.serialize, - response_deserializer=annotation_spec.AnnotationSpec.deserialize, - ) - return self._stubs['get_annotation_spec'] - - @property - def list_annotations(self) -> Callable[ - [dataset_service.ListAnnotationsRequest], - dataset_service.ListAnnotationsResponse]: - r"""Return a callable for the list annotations method over gRPC. - - Lists Annotations belongs to a dataitem - - Returns: - Callable[[~.ListAnnotationsRequest], - ~.ListAnnotationsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_annotations' not in self._stubs: - self._stubs['list_annotations'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.DatasetService/ListAnnotations', - request_serializer=dataset_service.ListAnnotationsRequest.serialize, - response_deserializer=dataset_service.ListAnnotationsResponse.deserialize, - ) - return self._stubs['list_annotations'] - - def close(self): - self.grpc_channel.close() - -__all__ = ( - 'DatasetServiceGrpcTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc_asyncio.py deleted file mode 100644 index ce98b30f7b..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/grpc_asyncio.py +++ /dev/null @@ -1,515 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers_async -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.aiplatform_v1beta1.types import annotation_spec -from google.cloud.aiplatform_v1beta1.types import dataset -from google.cloud.aiplatform_v1beta1.types import dataset as gca_dataset -from google.cloud.aiplatform_v1beta1.types import dataset_service -from google.longrunning import operations_pb2 # type: ignore -from .base import DatasetServiceTransport, DEFAULT_CLIENT_INFO -from .grpc import DatasetServiceGrpcTransport - - -class DatasetServiceGrpcAsyncIOTransport(DatasetServiceTransport): - """gRPC AsyncIO backend transport for DatasetService. - - The service that handles the CRUD of Vertex AI Dataset and - its child resources. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsAsyncClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_dataset(self) -> Callable[ - [dataset_service.CreateDatasetRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the create dataset method over gRPC. - - Creates a Dataset. - - Returns: - Callable[[~.CreateDatasetRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_dataset' not in self._stubs: - self._stubs['create_dataset'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.DatasetService/CreateDataset', - request_serializer=dataset_service.CreateDatasetRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_dataset'] - - @property - def get_dataset(self) -> Callable[ - [dataset_service.GetDatasetRequest], - Awaitable[dataset.Dataset]]: - r"""Return a callable for the get dataset method over gRPC. - - Gets a Dataset. - - Returns: - Callable[[~.GetDatasetRequest], - Awaitable[~.Dataset]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_dataset' not in self._stubs: - self._stubs['get_dataset'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.DatasetService/GetDataset', - request_serializer=dataset_service.GetDatasetRequest.serialize, - response_deserializer=dataset.Dataset.deserialize, - ) - return self._stubs['get_dataset'] - - @property - def update_dataset(self) -> Callable[ - [dataset_service.UpdateDatasetRequest], - Awaitable[gca_dataset.Dataset]]: - r"""Return a callable for the update dataset method over gRPC. - - Updates a Dataset. - - Returns: - Callable[[~.UpdateDatasetRequest], - Awaitable[~.Dataset]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_dataset' not in self._stubs: - self._stubs['update_dataset'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.DatasetService/UpdateDataset', - request_serializer=dataset_service.UpdateDatasetRequest.serialize, - response_deserializer=gca_dataset.Dataset.deserialize, - ) - return self._stubs['update_dataset'] - - @property - def list_datasets(self) -> Callable[ - [dataset_service.ListDatasetsRequest], - Awaitable[dataset_service.ListDatasetsResponse]]: - r"""Return a callable for the list datasets method over gRPC. - - Lists Datasets in a Location. - - Returns: - Callable[[~.ListDatasetsRequest], - Awaitable[~.ListDatasetsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_datasets' not in self._stubs: - self._stubs['list_datasets'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.DatasetService/ListDatasets', - request_serializer=dataset_service.ListDatasetsRequest.serialize, - response_deserializer=dataset_service.ListDatasetsResponse.deserialize, - ) - return self._stubs['list_datasets'] - - @property - def delete_dataset(self) -> Callable[ - [dataset_service.DeleteDatasetRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete dataset method over gRPC. - - Deletes a Dataset. - - Returns: - Callable[[~.DeleteDatasetRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_dataset' not in self._stubs: - self._stubs['delete_dataset'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.DatasetService/DeleteDataset', - request_serializer=dataset_service.DeleteDatasetRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_dataset'] - - @property - def import_data(self) -> Callable[ - [dataset_service.ImportDataRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the import data method over gRPC. - - Imports data into a Dataset. - - Returns: - Callable[[~.ImportDataRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'import_data' not in self._stubs: - self._stubs['import_data'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.DatasetService/ImportData', - request_serializer=dataset_service.ImportDataRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['import_data'] - - @property - def export_data(self) -> Callable[ - [dataset_service.ExportDataRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the export data method over gRPC. - - Exports data from a Dataset. - - Returns: - Callable[[~.ExportDataRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'export_data' not in self._stubs: - self._stubs['export_data'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.DatasetService/ExportData', - request_serializer=dataset_service.ExportDataRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['export_data'] - - @property - def list_data_items(self) -> Callable[ - [dataset_service.ListDataItemsRequest], - Awaitable[dataset_service.ListDataItemsResponse]]: - r"""Return a callable for the list data items method over gRPC. - - Lists DataItems in a Dataset. - - Returns: - Callable[[~.ListDataItemsRequest], - Awaitable[~.ListDataItemsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_data_items' not in self._stubs: - self._stubs['list_data_items'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.DatasetService/ListDataItems', - request_serializer=dataset_service.ListDataItemsRequest.serialize, - response_deserializer=dataset_service.ListDataItemsResponse.deserialize, - ) - return self._stubs['list_data_items'] - - @property - def get_annotation_spec(self) -> Callable[ - [dataset_service.GetAnnotationSpecRequest], - Awaitable[annotation_spec.AnnotationSpec]]: - r"""Return a callable for the get annotation spec method over gRPC. - - Gets an AnnotationSpec. - - Returns: - Callable[[~.GetAnnotationSpecRequest], - Awaitable[~.AnnotationSpec]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_annotation_spec' not in self._stubs: - self._stubs['get_annotation_spec'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.DatasetService/GetAnnotationSpec', - request_serializer=dataset_service.GetAnnotationSpecRequest.serialize, - response_deserializer=annotation_spec.AnnotationSpec.deserialize, - ) - return self._stubs['get_annotation_spec'] - - @property - def list_annotations(self) -> Callable[ - [dataset_service.ListAnnotationsRequest], - Awaitable[dataset_service.ListAnnotationsResponse]]: - r"""Return a callable for the list annotations method over gRPC. - - Lists Annotations belongs to a dataitem - - Returns: - Callable[[~.ListAnnotationsRequest], - Awaitable[~.ListAnnotationsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_annotations' not in self._stubs: - self._stubs['list_annotations'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.DatasetService/ListAnnotations', - request_serializer=dataset_service.ListAnnotationsRequest.serialize, - response_deserializer=dataset_service.ListAnnotationsResponse.deserialize, - ) - return self._stubs['list_annotations'] - - def close(self): - return self.grpc_channel.close() - - -__all__ = ( - 'DatasetServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/__init__.py deleted file mode 100644 index 7db43e768e..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import EndpointServiceClient -from .async_client import EndpointServiceAsyncClient - -__all__ = ( - 'EndpointServiceClient', - 'EndpointServiceAsyncClient', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py deleted file mode 100644 index 16a93ca6d4..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/async_client.py +++ /dev/null @@ -1,889 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core.client_options import ClientOptions -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.endpoint_service import pagers -from google.cloud.aiplatform_v1beta1.types import encryption_spec -from google.cloud.aiplatform_v1beta1.types import endpoint -from google.cloud.aiplatform_v1beta1.types import endpoint as gca_endpoint -from google.cloud.aiplatform_v1beta1.types import endpoint_service -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import EndpointServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import EndpointServiceGrpcAsyncIOTransport -from .client import EndpointServiceClient - - -class EndpointServiceAsyncClient: - """A service for managing Vertex AI's Endpoints.""" - - _client: EndpointServiceClient - - DEFAULT_ENDPOINT = EndpointServiceClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = EndpointServiceClient.DEFAULT_MTLS_ENDPOINT - - endpoint_path = staticmethod(EndpointServiceClient.endpoint_path) - parse_endpoint_path = staticmethod(EndpointServiceClient.parse_endpoint_path) - model_path = staticmethod(EndpointServiceClient.model_path) - parse_model_path = staticmethod(EndpointServiceClient.parse_model_path) - model_deployment_monitoring_job_path = staticmethod(EndpointServiceClient.model_deployment_monitoring_job_path) - parse_model_deployment_monitoring_job_path = staticmethod(EndpointServiceClient.parse_model_deployment_monitoring_job_path) - network_path = staticmethod(EndpointServiceClient.network_path) - parse_network_path = staticmethod(EndpointServiceClient.parse_network_path) - common_billing_account_path = staticmethod(EndpointServiceClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(EndpointServiceClient.parse_common_billing_account_path) - common_folder_path = staticmethod(EndpointServiceClient.common_folder_path) - parse_common_folder_path = staticmethod(EndpointServiceClient.parse_common_folder_path) - common_organization_path = staticmethod(EndpointServiceClient.common_organization_path) - parse_common_organization_path = staticmethod(EndpointServiceClient.parse_common_organization_path) - common_project_path = staticmethod(EndpointServiceClient.common_project_path) - parse_common_project_path = staticmethod(EndpointServiceClient.parse_common_project_path) - common_location_path = staticmethod(EndpointServiceClient.common_location_path) - parse_common_location_path = staticmethod(EndpointServiceClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - EndpointServiceAsyncClient: The constructed client. - """ - return EndpointServiceClient.from_service_account_info.__func__(EndpointServiceAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - EndpointServiceAsyncClient: The constructed client. - """ - return EndpointServiceClient.from_service_account_file.__func__(EndpointServiceAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> EndpointServiceTransport: - """Returns the transport used by the client instance. - - Returns: - EndpointServiceTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(EndpointServiceClient).get_transport_class, type(EndpointServiceClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, EndpointServiceTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the endpoint service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.EndpointServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = EndpointServiceClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def create_endpoint(self, - request: Union[endpoint_service.CreateEndpointRequest, dict] = None, - *, - parent: str = None, - endpoint: gca_endpoint.Endpoint = None, - endpoint_id: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Creates an Endpoint. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CreateEndpointRequest, dict]): - The request object. Request message for - [EndpointService.CreateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.CreateEndpoint]. - parent (:class:`str`): - Required. The resource name of the Location to create - the Endpoint in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - endpoint (:class:`google.cloud.aiplatform_v1beta1.types.Endpoint`): - Required. The Endpoint to create. - This corresponds to the ``endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - endpoint_id (:class:`str`): - Immutable. The ID to use for endpoint, which will become - the final component of the endpoint resource name. If - not provided, Vertex AI will generate a value for this - ID. - - This value should be 1-10 characters, and valid - characters are /[0-9]/. When using HTTP/JSON, this field - is populated based on a query string argument, such as - ``?endpoint_id=12345``. This is the fallback for fields - that are not included in either the URI or the body. - - This corresponds to the ``endpoint_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Endpoint` Models are deployed into it, and afterwards Endpoint is called to obtain - predictions and explanations. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, endpoint, endpoint_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = endpoint_service.CreateEndpointRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if endpoint is not None: - request.endpoint = endpoint - if endpoint_id is not None: - request.endpoint_id = endpoint_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_endpoint, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - gca_endpoint.Endpoint, - metadata_type=endpoint_service.CreateEndpointOperationMetadata, - ) - - # Done; return the response. - return response - - async def get_endpoint(self, - request: Union[endpoint_service.GetEndpointRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> endpoint.Endpoint: - r"""Gets an Endpoint. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.GetEndpointRequest, dict]): - The request object. Request message for - [EndpointService.GetEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.GetEndpoint] - name (:class:`str`): - Required. The name of the Endpoint resource. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Endpoint: - Models are deployed into it, and - afterwards Endpoint is called to obtain - predictions and explanations. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = endpoint_service.GetEndpointRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_endpoint, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_endpoints(self, - request: Union[endpoint_service.ListEndpointsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListEndpointsAsyncPager: - r"""Lists Endpoints in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListEndpointsRequest, dict]): - The request object. Request message for - [EndpointService.ListEndpoints][google.cloud.aiplatform.v1beta1.EndpointService.ListEndpoints]. - parent (:class:`str`): - Required. The resource name of the Location from which - to list the Endpoints. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.endpoint_service.pagers.ListEndpointsAsyncPager: - Response message for - [EndpointService.ListEndpoints][google.cloud.aiplatform.v1beta1.EndpointService.ListEndpoints]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = endpoint_service.ListEndpointsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_endpoints, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListEndpointsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_endpoint(self, - request: Union[endpoint_service.UpdateEndpointRequest, dict] = None, - *, - endpoint: gca_endpoint.Endpoint = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_endpoint.Endpoint: - r"""Updates an Endpoint. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.UpdateEndpointRequest, dict]): - The request object. Request message for - [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.UpdateEndpoint]. - endpoint (:class:`google.cloud.aiplatform_v1beta1.types.Endpoint`): - Required. The Endpoint which replaces - the resource on the server. - - This corresponds to the ``endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. The update mask applies to the resource. See - [google.protobuf.FieldMask][google.protobuf.FieldMask]. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Endpoint: - Models are deployed into it, and - afterwards Endpoint is called to obtain - predictions and explanations. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = endpoint_service.UpdateEndpointRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if endpoint is not None: - request.endpoint = endpoint - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_endpoint, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("endpoint.name", request.endpoint.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_endpoint(self, - request: Union[endpoint_service.DeleteEndpointRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes an Endpoint. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.DeleteEndpointRequest, dict]): - The request object. Request message for - [EndpointService.DeleteEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.DeleteEndpoint]. - name (:class:`str`): - Required. The name of the Endpoint resource to be - deleted. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = endpoint_service.DeleteEndpointRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_endpoint, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def deploy_model(self, - request: Union[endpoint_service.DeployModelRequest, dict] = None, - *, - endpoint: str = None, - deployed_model: gca_endpoint.DeployedModel = None, - traffic_split: Sequence[endpoint_service.DeployModelRequest.TrafficSplitEntry] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deploys a Model into this Endpoint, creating a - DeployedModel within it. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.DeployModelRequest, dict]): - The request object. Request message for - [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]. - endpoint (:class:`str`): - Required. The name of the Endpoint resource into which - to deploy a Model. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - - This corresponds to the ``endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - deployed_model (:class:`google.cloud.aiplatform_v1beta1.types.DeployedModel`): - Required. The DeployedModel to be created within the - Endpoint. Note that - [Endpoint.traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] - must be updated for the DeployedModel to start receiving - traffic, either as part of this call, or via - [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.UpdateEndpoint]. - - This corresponds to the ``deployed_model`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - traffic_split (:class:`Sequence[google.cloud.aiplatform_v1beta1.types.DeployModelRequest.TrafficSplitEntry]`): - A map from a DeployedModel's ID to the percentage of - this Endpoint's traffic that should be forwarded to that - DeployedModel. - - If this field is non-empty, then the Endpoint's - [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] - will be overwritten with it. To refer to the ID of the - just being deployed Model, a "0" should be used, and the - actual ID of the new DeployedModel will be filled in its - place by this method. The traffic percentage values must - add up to 100. - - If this field is empty, then the Endpoint's - [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] - is not updated. - - This corresponds to the ``traffic_split`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.DeployModelResponse` - Response message for - [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, deployed_model, traffic_split]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = endpoint_service.DeployModelRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if endpoint is not None: - request.endpoint = endpoint - if deployed_model is not None: - request.deployed_model = deployed_model - - if traffic_split: - request.traffic_split.update(traffic_split) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.deploy_model, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("endpoint", request.endpoint), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - endpoint_service.DeployModelResponse, - metadata_type=endpoint_service.DeployModelOperationMetadata, - ) - - # Done; return the response. - return response - - async def undeploy_model(self, - request: Union[endpoint_service.UndeployModelRequest, dict] = None, - *, - endpoint: str = None, - deployed_model_id: str = None, - traffic_split: Sequence[endpoint_service.UndeployModelRequest.TrafficSplitEntry] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Undeploys a Model from an Endpoint, removing a - DeployedModel from it, and freeing all resources it's - using. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.UndeployModelRequest, dict]): - The request object. Request message for - [EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel]. - endpoint (:class:`str`): - Required. The name of the Endpoint resource from which - to undeploy a Model. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - - This corresponds to the ``endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - deployed_model_id (:class:`str`): - Required. The ID of the DeployedModel - to be undeployed from the Endpoint. - - This corresponds to the ``deployed_model_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - traffic_split (:class:`Sequence[google.cloud.aiplatform_v1beta1.types.UndeployModelRequest.TrafficSplitEntry]`): - If this field is provided, then the Endpoint's - [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] - will be overwritten with it. If last DeployedModel is - being undeployed from the Endpoint, the - [Endpoint.traffic_split] will always end up empty when - this call returns. A DeployedModel will be successfully - undeployed only if it doesn't have any traffic assigned - to it when this method executes, or if this field - unassigns any traffic to it. - - This corresponds to the ``traffic_split`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.UndeployModelResponse` - Response message for - [EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, deployed_model_id, traffic_split]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = endpoint_service.UndeployModelRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if endpoint is not None: - request.endpoint = endpoint - if deployed_model_id is not None: - request.deployed_model_id = deployed_model_id - - if traffic_split: - request.traffic_split.update(traffic_split) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.undeploy_model, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("endpoint", request.endpoint), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - endpoint_service.UndeployModelResponse, - metadata_type=endpoint_service.UndeployModelOperationMetadata, - ) - - # Done; return the response. - return response - - async def __aenter__(self): - return self - - async def __aexit__(self, exc_type, exc, tb): - await self.transport.close() - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "EndpointServiceAsyncClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/client.py deleted file mode 100644 index 3c092de358..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/client.py +++ /dev/null @@ -1,1112 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import os -import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.endpoint_service import pagers -from google.cloud.aiplatform_v1beta1.types import encryption_spec -from google.cloud.aiplatform_v1beta1.types import endpoint -from google.cloud.aiplatform_v1beta1.types import endpoint as gca_endpoint -from google.cloud.aiplatform_v1beta1.types import endpoint_service -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import EndpointServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import EndpointServiceGrpcTransport -from .transports.grpc_asyncio import EndpointServiceGrpcAsyncIOTransport - - -class EndpointServiceClientMeta(type): - """Metaclass for the EndpointService client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[EndpointServiceTransport]] - _transport_registry["grpc"] = EndpointServiceGrpcTransport - _transport_registry["grpc_asyncio"] = EndpointServiceGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[EndpointServiceTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class EndpointServiceClient(metaclass=EndpointServiceClientMeta): - """A service for managing Vertex AI's Endpoints.""" - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "aiplatform.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - EndpointServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - EndpointServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> EndpointServiceTransport: - """Returns the transport used by the client instance. - - Returns: - EndpointServiceTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def endpoint_path(project: str,location: str,endpoint: str,) -> str: - """Returns a fully-qualified endpoint string.""" - return "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) - - @staticmethod - def parse_endpoint_path(path: str) -> Dict[str,str]: - """Parses a endpoint path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/endpoints/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def model_path(project: str,location: str,model: str,) -> str: - """Returns a fully-qualified model string.""" - return "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) - - @staticmethod - def parse_model_path(path: str) -> Dict[str,str]: - """Parses a model path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def model_deployment_monitoring_job_path(project: str,location: str,model_deployment_monitoring_job: str,) -> str: - """Returns a fully-qualified model_deployment_monitoring_job string.""" - return "projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}".format(project=project, location=location, model_deployment_monitoring_job=model_deployment_monitoring_job, ) - - @staticmethod - def parse_model_deployment_monitoring_job_path(path: str) -> Dict[str,str]: - """Parses a model_deployment_monitoring_job path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/modelDeploymentMonitoringJobs/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def network_path(project: str,network: str,) -> str: - """Returns a fully-qualified network string.""" - return "projects/{project}/global/networks/{network}".format(project=project, network=network, ) - - @staticmethod - def parse_network_path(path: str) -> Dict[str,str]: - """Parses a network path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/global/networks/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, EndpointServiceTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the endpoint service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, EndpointServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): - raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, EndpointServiceTransport): - # transport is a EndpointServiceTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - always_use_jwt_access=True, - ) - - def create_endpoint(self, - request: Union[endpoint_service.CreateEndpointRequest, dict] = None, - *, - parent: str = None, - endpoint: gca_endpoint.Endpoint = None, - endpoint_id: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Creates an Endpoint. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CreateEndpointRequest, dict]): - The request object. Request message for - [EndpointService.CreateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.CreateEndpoint]. - parent (str): - Required. The resource name of the Location to create - the Endpoint in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - endpoint (google.cloud.aiplatform_v1beta1.types.Endpoint): - Required. The Endpoint to create. - This corresponds to the ``endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - endpoint_id (str): - Immutable. The ID to use for endpoint, which will become - the final component of the endpoint resource name. If - not provided, Vertex AI will generate a value for this - ID. - - This value should be 1-10 characters, and valid - characters are /[0-9]/. When using HTTP/JSON, this field - is populated based on a query string argument, such as - ``?endpoint_id=12345``. This is the fallback for fields - that are not included in either the URI or the body. - - This corresponds to the ``endpoint_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Endpoint` Models are deployed into it, and afterwards Endpoint is called to obtain - predictions and explanations. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, endpoint, endpoint_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a endpoint_service.CreateEndpointRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, endpoint_service.CreateEndpointRequest): - request = endpoint_service.CreateEndpointRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if endpoint is not None: - request.endpoint = endpoint - if endpoint_id is not None: - request.endpoint_id = endpoint_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_endpoint] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - gca_endpoint.Endpoint, - metadata_type=endpoint_service.CreateEndpointOperationMetadata, - ) - - # Done; return the response. - return response - - def get_endpoint(self, - request: Union[endpoint_service.GetEndpointRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> endpoint.Endpoint: - r"""Gets an Endpoint. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.GetEndpointRequest, dict]): - The request object. Request message for - [EndpointService.GetEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.GetEndpoint] - name (str): - Required. The name of the Endpoint resource. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Endpoint: - Models are deployed into it, and - afterwards Endpoint is called to obtain - predictions and explanations. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a endpoint_service.GetEndpointRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, endpoint_service.GetEndpointRequest): - request = endpoint_service.GetEndpointRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_endpoint] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_endpoints(self, - request: Union[endpoint_service.ListEndpointsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListEndpointsPager: - r"""Lists Endpoints in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListEndpointsRequest, dict]): - The request object. Request message for - [EndpointService.ListEndpoints][google.cloud.aiplatform.v1beta1.EndpointService.ListEndpoints]. - parent (str): - Required. The resource name of the Location from which - to list the Endpoints. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.endpoint_service.pagers.ListEndpointsPager: - Response message for - [EndpointService.ListEndpoints][google.cloud.aiplatform.v1beta1.EndpointService.ListEndpoints]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a endpoint_service.ListEndpointsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, endpoint_service.ListEndpointsRequest): - request = endpoint_service.ListEndpointsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_endpoints] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListEndpointsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_endpoint(self, - request: Union[endpoint_service.UpdateEndpointRequest, dict] = None, - *, - endpoint: gca_endpoint.Endpoint = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_endpoint.Endpoint: - r"""Updates an Endpoint. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.UpdateEndpointRequest, dict]): - The request object. Request message for - [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.UpdateEndpoint]. - endpoint (google.cloud.aiplatform_v1beta1.types.Endpoint): - Required. The Endpoint which replaces - the resource on the server. - - This corresponds to the ``endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The update mask applies to the resource. See - [google.protobuf.FieldMask][google.protobuf.FieldMask]. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Endpoint: - Models are deployed into it, and - afterwards Endpoint is called to obtain - predictions and explanations. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a endpoint_service.UpdateEndpointRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, endpoint_service.UpdateEndpointRequest): - request = endpoint_service.UpdateEndpointRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if endpoint is not None: - request.endpoint = endpoint - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_endpoint] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("endpoint.name", request.endpoint.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_endpoint(self, - request: Union[endpoint_service.DeleteEndpointRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes an Endpoint. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.DeleteEndpointRequest, dict]): - The request object. Request message for - [EndpointService.DeleteEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.DeleteEndpoint]. - name (str): - Required. The name of the Endpoint resource to be - deleted. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a endpoint_service.DeleteEndpointRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, endpoint_service.DeleteEndpointRequest): - request = endpoint_service.DeleteEndpointRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_endpoint] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def deploy_model(self, - request: Union[endpoint_service.DeployModelRequest, dict] = None, - *, - endpoint: str = None, - deployed_model: gca_endpoint.DeployedModel = None, - traffic_split: Sequence[endpoint_service.DeployModelRequest.TrafficSplitEntry] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deploys a Model into this Endpoint, creating a - DeployedModel within it. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.DeployModelRequest, dict]): - The request object. Request message for - [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]. - endpoint (str): - Required. The name of the Endpoint resource into which - to deploy a Model. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - - This corresponds to the ``endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - deployed_model (google.cloud.aiplatform_v1beta1.types.DeployedModel): - Required. The DeployedModel to be created within the - Endpoint. Note that - [Endpoint.traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] - must be updated for the DeployedModel to start receiving - traffic, either as part of this call, or via - [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.UpdateEndpoint]. - - This corresponds to the ``deployed_model`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - traffic_split (Sequence[google.cloud.aiplatform_v1beta1.types.DeployModelRequest.TrafficSplitEntry]): - A map from a DeployedModel's ID to the percentage of - this Endpoint's traffic that should be forwarded to that - DeployedModel. - - If this field is non-empty, then the Endpoint's - [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] - will be overwritten with it. To refer to the ID of the - just being deployed Model, a "0" should be used, and the - actual ID of the new DeployedModel will be filled in its - place by this method. The traffic percentage values must - add up to 100. - - If this field is empty, then the Endpoint's - [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] - is not updated. - - This corresponds to the ``traffic_split`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.DeployModelResponse` - Response message for - [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, deployed_model, traffic_split]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a endpoint_service.DeployModelRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, endpoint_service.DeployModelRequest): - request = endpoint_service.DeployModelRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if endpoint is not None: - request.endpoint = endpoint - if deployed_model is not None: - request.deployed_model = deployed_model - if traffic_split is not None: - request.traffic_split = traffic_split - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.deploy_model] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("endpoint", request.endpoint), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - endpoint_service.DeployModelResponse, - metadata_type=endpoint_service.DeployModelOperationMetadata, - ) - - # Done; return the response. - return response - - def undeploy_model(self, - request: Union[endpoint_service.UndeployModelRequest, dict] = None, - *, - endpoint: str = None, - deployed_model_id: str = None, - traffic_split: Sequence[endpoint_service.UndeployModelRequest.TrafficSplitEntry] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Undeploys a Model from an Endpoint, removing a - DeployedModel from it, and freeing all resources it's - using. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.UndeployModelRequest, dict]): - The request object. Request message for - [EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel]. - endpoint (str): - Required. The name of the Endpoint resource from which - to undeploy a Model. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - - This corresponds to the ``endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - deployed_model_id (str): - Required. The ID of the DeployedModel - to be undeployed from the Endpoint. - - This corresponds to the ``deployed_model_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - traffic_split (Sequence[google.cloud.aiplatform_v1beta1.types.UndeployModelRequest.TrafficSplitEntry]): - If this field is provided, then the Endpoint's - [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] - will be overwritten with it. If last DeployedModel is - being undeployed from the Endpoint, the - [Endpoint.traffic_split] will always end up empty when - this call returns. A DeployedModel will be successfully - undeployed only if it doesn't have any traffic assigned - to it when this method executes, or if this field - unassigns any traffic to it. - - This corresponds to the ``traffic_split`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.UndeployModelResponse` - Response message for - [EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, deployed_model_id, traffic_split]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a endpoint_service.UndeployModelRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, endpoint_service.UndeployModelRequest): - request = endpoint_service.UndeployModelRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if endpoint is not None: - request.endpoint = endpoint - if deployed_model_id is not None: - request.deployed_model_id = deployed_model_id - if traffic_split is not None: - request.traffic_split = traffic_split - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.undeploy_model] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("endpoint", request.endpoint), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - endpoint_service.UndeployModelResponse, - metadata_type=endpoint_service.UndeployModelOperationMetadata, - ) - - # Done; return the response. - return response - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - """Releases underlying transport's resources. - - .. warning:: - ONLY use as a context manager if the transport is NOT shared - with other clients! Exiting the with block will CLOSE the transport - and may cause errors in other clients! - """ - self.transport.close() - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "EndpointServiceClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/pagers.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/pagers.py deleted file mode 100644 index 9b56685e50..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/pagers.py +++ /dev/null @@ -1,141 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator - -from google.cloud.aiplatform_v1beta1.types import endpoint -from google.cloud.aiplatform_v1beta1.types import endpoint_service - - -class ListEndpointsPager: - """A pager for iterating through ``list_endpoints`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListEndpointsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``endpoints`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListEndpoints`` requests and continue to iterate - through the ``endpoints`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListEndpointsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., endpoint_service.ListEndpointsResponse], - request: endpoint_service.ListEndpointsRequest, - response: endpoint_service.ListEndpointsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListEndpointsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListEndpointsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = endpoint_service.ListEndpointsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[endpoint_service.ListEndpointsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[endpoint.Endpoint]: - for page in self.pages: - yield from page.endpoints - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListEndpointsAsyncPager: - """A pager for iterating through ``list_endpoints`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListEndpointsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``endpoints`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListEndpoints`` requests and continue to iterate - through the ``endpoints`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListEndpointsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[endpoint_service.ListEndpointsResponse]], - request: endpoint_service.ListEndpointsRequest, - response: endpoint_service.ListEndpointsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListEndpointsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListEndpointsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = endpoint_service.ListEndpointsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[endpoint_service.ListEndpointsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[endpoint.Endpoint]: - async def async_generator(): - async for page in self.pages: - for response in page.endpoints: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/__init__.py deleted file mode 100644 index a062fc074c..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import EndpointServiceTransport -from .grpc import EndpointServiceGrpcTransport -from .grpc_asyncio import EndpointServiceGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[EndpointServiceTransport]] -_transport_registry['grpc'] = EndpointServiceGrpcTransport -_transport_registry['grpc_asyncio'] = EndpointServiceGrpcAsyncIOTransport - -__all__ = ( - 'EndpointServiceTransport', - 'EndpointServiceGrpcTransport', - 'EndpointServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/base.py deleted file mode 100644 index aa217833a9..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/base.py +++ /dev/null @@ -1,239 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import pkg_resources - -import google.auth # type: ignore -import google.api_core -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.aiplatform_v1beta1.types import endpoint -from google.cloud.aiplatform_v1beta1.types import endpoint as gca_endpoint -from google.cloud.aiplatform_v1beta1.types import endpoint_service -from google.longrunning import operations_pb2 # type: ignore - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-aiplatform', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -class EndpointServiceTransport(abc.ABC): - """Abstract transport class for EndpointService.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'aiplatform.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials are service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.create_endpoint: gapic_v1.method.wrap_method( - self.create_endpoint, - default_timeout=5.0, - client_info=client_info, - ), - self.get_endpoint: gapic_v1.method.wrap_method( - self.get_endpoint, - default_timeout=5.0, - client_info=client_info, - ), - self.list_endpoints: gapic_v1.method.wrap_method( - self.list_endpoints, - default_timeout=5.0, - client_info=client_info, - ), - self.update_endpoint: gapic_v1.method.wrap_method( - self.update_endpoint, - default_timeout=5.0, - client_info=client_info, - ), - self.delete_endpoint: gapic_v1.method.wrap_method( - self.delete_endpoint, - default_timeout=5.0, - client_info=client_info, - ), - self.deploy_model: gapic_v1.method.wrap_method( - self.deploy_model, - default_timeout=5.0, - client_info=client_info, - ), - self.undeploy_model: gapic_v1.method.wrap_method( - self.undeploy_model, - default_timeout=5.0, - client_info=client_info, - ), - } - - def close(self): - """Closes resources associated with the transport. - - .. warning:: - Only call this method if the transport is NOT shared - with other clients - this may cause errors in other clients! - """ - raise NotImplementedError() - - @property - def operations_client(self): - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def create_endpoint(self) -> Callable[ - [endpoint_service.CreateEndpointRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def get_endpoint(self) -> Callable[ - [endpoint_service.GetEndpointRequest], - Union[ - endpoint.Endpoint, - Awaitable[endpoint.Endpoint] - ]]: - raise NotImplementedError() - - @property - def list_endpoints(self) -> Callable[ - [endpoint_service.ListEndpointsRequest], - Union[ - endpoint_service.ListEndpointsResponse, - Awaitable[endpoint_service.ListEndpointsResponse] - ]]: - raise NotImplementedError() - - @property - def update_endpoint(self) -> Callable[ - [endpoint_service.UpdateEndpointRequest], - Union[ - gca_endpoint.Endpoint, - Awaitable[gca_endpoint.Endpoint] - ]]: - raise NotImplementedError() - - @property - def delete_endpoint(self) -> Callable[ - [endpoint_service.DeleteEndpointRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def deploy_model(self) -> Callable[ - [endpoint_service.DeployModelRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def undeploy_model(self) -> Callable[ - [endpoint_service.UndeployModelRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'EndpointServiceTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc.py deleted file mode 100644 index 9097917238..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc.py +++ /dev/null @@ -1,434 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers -from google.api_core import operations_v1 -from google.api_core import gapic_v1 -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.aiplatform_v1beta1.types import endpoint -from google.cloud.aiplatform_v1beta1.types import endpoint as gca_endpoint -from google.cloud.aiplatform_v1beta1.types import endpoint_service -from google.longrunning import operations_pb2 # type: ignore -from .base import EndpointServiceTransport, DEFAULT_CLIENT_INFO - - -class EndpointServiceGrpcTransport(EndpointServiceTransport): - """gRPC backend transport for EndpointService. - - A service for managing Vertex AI's Endpoints. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_endpoint(self) -> Callable[ - [endpoint_service.CreateEndpointRequest], - operations_pb2.Operation]: - r"""Return a callable for the create endpoint method over gRPC. - - Creates an Endpoint. - - Returns: - Callable[[~.CreateEndpointRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_endpoint' not in self._stubs: - self._stubs['create_endpoint'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.EndpointService/CreateEndpoint', - request_serializer=endpoint_service.CreateEndpointRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_endpoint'] - - @property - def get_endpoint(self) -> Callable[ - [endpoint_service.GetEndpointRequest], - endpoint.Endpoint]: - r"""Return a callable for the get endpoint method over gRPC. - - Gets an Endpoint. - - Returns: - Callable[[~.GetEndpointRequest], - ~.Endpoint]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_endpoint' not in self._stubs: - self._stubs['get_endpoint'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.EndpointService/GetEndpoint', - request_serializer=endpoint_service.GetEndpointRequest.serialize, - response_deserializer=endpoint.Endpoint.deserialize, - ) - return self._stubs['get_endpoint'] - - @property - def list_endpoints(self) -> Callable[ - [endpoint_service.ListEndpointsRequest], - endpoint_service.ListEndpointsResponse]: - r"""Return a callable for the list endpoints method over gRPC. - - Lists Endpoints in a Location. - - Returns: - Callable[[~.ListEndpointsRequest], - ~.ListEndpointsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_endpoints' not in self._stubs: - self._stubs['list_endpoints'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.EndpointService/ListEndpoints', - request_serializer=endpoint_service.ListEndpointsRequest.serialize, - response_deserializer=endpoint_service.ListEndpointsResponse.deserialize, - ) - return self._stubs['list_endpoints'] - - @property - def update_endpoint(self) -> Callable[ - [endpoint_service.UpdateEndpointRequest], - gca_endpoint.Endpoint]: - r"""Return a callable for the update endpoint method over gRPC. - - Updates an Endpoint. - - Returns: - Callable[[~.UpdateEndpointRequest], - ~.Endpoint]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_endpoint' not in self._stubs: - self._stubs['update_endpoint'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.EndpointService/UpdateEndpoint', - request_serializer=endpoint_service.UpdateEndpointRequest.serialize, - response_deserializer=gca_endpoint.Endpoint.deserialize, - ) - return self._stubs['update_endpoint'] - - @property - def delete_endpoint(self) -> Callable[ - [endpoint_service.DeleteEndpointRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete endpoint method over gRPC. - - Deletes an Endpoint. - - Returns: - Callable[[~.DeleteEndpointRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_endpoint' not in self._stubs: - self._stubs['delete_endpoint'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.EndpointService/DeleteEndpoint', - request_serializer=endpoint_service.DeleteEndpointRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_endpoint'] - - @property - def deploy_model(self) -> Callable[ - [endpoint_service.DeployModelRequest], - operations_pb2.Operation]: - r"""Return a callable for the deploy model method over gRPC. - - Deploys a Model into this Endpoint, creating a - DeployedModel within it. - - Returns: - Callable[[~.DeployModelRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'deploy_model' not in self._stubs: - self._stubs['deploy_model'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.EndpointService/DeployModel', - request_serializer=endpoint_service.DeployModelRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['deploy_model'] - - @property - def undeploy_model(self) -> Callable[ - [endpoint_service.UndeployModelRequest], - operations_pb2.Operation]: - r"""Return a callable for the undeploy model method over gRPC. - - Undeploys a Model from an Endpoint, removing a - DeployedModel from it, and freeing all resources it's - using. - - Returns: - Callable[[~.UndeployModelRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'undeploy_model' not in self._stubs: - self._stubs['undeploy_model'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.EndpointService/UndeployModel', - request_serializer=endpoint_service.UndeployModelRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['undeploy_model'] - - def close(self): - self.grpc_channel.close() - -__all__ = ( - 'EndpointServiceGrpcTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc_asyncio.py deleted file mode 100644 index 0c55cc82dc..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/grpc_asyncio.py +++ /dev/null @@ -1,438 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers_async -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.aiplatform_v1beta1.types import endpoint -from google.cloud.aiplatform_v1beta1.types import endpoint as gca_endpoint -from google.cloud.aiplatform_v1beta1.types import endpoint_service -from google.longrunning import operations_pb2 # type: ignore -from .base import EndpointServiceTransport, DEFAULT_CLIENT_INFO -from .grpc import EndpointServiceGrpcTransport - - -class EndpointServiceGrpcAsyncIOTransport(EndpointServiceTransport): - """gRPC AsyncIO backend transport for EndpointService. - - A service for managing Vertex AI's Endpoints. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsAsyncClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_endpoint(self) -> Callable[ - [endpoint_service.CreateEndpointRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the create endpoint method over gRPC. - - Creates an Endpoint. - - Returns: - Callable[[~.CreateEndpointRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_endpoint' not in self._stubs: - self._stubs['create_endpoint'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.EndpointService/CreateEndpoint', - request_serializer=endpoint_service.CreateEndpointRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_endpoint'] - - @property - def get_endpoint(self) -> Callable[ - [endpoint_service.GetEndpointRequest], - Awaitable[endpoint.Endpoint]]: - r"""Return a callable for the get endpoint method over gRPC. - - Gets an Endpoint. - - Returns: - Callable[[~.GetEndpointRequest], - Awaitable[~.Endpoint]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_endpoint' not in self._stubs: - self._stubs['get_endpoint'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.EndpointService/GetEndpoint', - request_serializer=endpoint_service.GetEndpointRequest.serialize, - response_deserializer=endpoint.Endpoint.deserialize, - ) - return self._stubs['get_endpoint'] - - @property - def list_endpoints(self) -> Callable[ - [endpoint_service.ListEndpointsRequest], - Awaitable[endpoint_service.ListEndpointsResponse]]: - r"""Return a callable for the list endpoints method over gRPC. - - Lists Endpoints in a Location. - - Returns: - Callable[[~.ListEndpointsRequest], - Awaitable[~.ListEndpointsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_endpoints' not in self._stubs: - self._stubs['list_endpoints'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.EndpointService/ListEndpoints', - request_serializer=endpoint_service.ListEndpointsRequest.serialize, - response_deserializer=endpoint_service.ListEndpointsResponse.deserialize, - ) - return self._stubs['list_endpoints'] - - @property - def update_endpoint(self) -> Callable[ - [endpoint_service.UpdateEndpointRequest], - Awaitable[gca_endpoint.Endpoint]]: - r"""Return a callable for the update endpoint method over gRPC. - - Updates an Endpoint. - - Returns: - Callable[[~.UpdateEndpointRequest], - Awaitable[~.Endpoint]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_endpoint' not in self._stubs: - self._stubs['update_endpoint'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.EndpointService/UpdateEndpoint', - request_serializer=endpoint_service.UpdateEndpointRequest.serialize, - response_deserializer=gca_endpoint.Endpoint.deserialize, - ) - return self._stubs['update_endpoint'] - - @property - def delete_endpoint(self) -> Callable[ - [endpoint_service.DeleteEndpointRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete endpoint method over gRPC. - - Deletes an Endpoint. - - Returns: - Callable[[~.DeleteEndpointRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_endpoint' not in self._stubs: - self._stubs['delete_endpoint'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.EndpointService/DeleteEndpoint', - request_serializer=endpoint_service.DeleteEndpointRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_endpoint'] - - @property - def deploy_model(self) -> Callable[ - [endpoint_service.DeployModelRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the deploy model method over gRPC. - - Deploys a Model into this Endpoint, creating a - DeployedModel within it. - - Returns: - Callable[[~.DeployModelRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'deploy_model' not in self._stubs: - self._stubs['deploy_model'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.EndpointService/DeployModel', - request_serializer=endpoint_service.DeployModelRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['deploy_model'] - - @property - def undeploy_model(self) -> Callable[ - [endpoint_service.UndeployModelRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the undeploy model method over gRPC. - - Undeploys a Model from an Endpoint, removing a - DeployedModel from it, and freeing all resources it's - using. - - Returns: - Callable[[~.UndeployModelRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'undeploy_model' not in self._stubs: - self._stubs['undeploy_model'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.EndpointService/UndeployModel', - request_serializer=endpoint_service.UndeployModelRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['undeploy_model'] - - def close(self): - return self.grpc_channel.close() - - -__all__ = ( - 'EndpointServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/__init__.py deleted file mode 100644 index e009ebaec2..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import FeaturestoreOnlineServingServiceClient -from .async_client import FeaturestoreOnlineServingServiceAsyncClient - -__all__ = ( - 'FeaturestoreOnlineServingServiceClient', - 'FeaturestoreOnlineServingServiceAsyncClient', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/async_client.py deleted file mode 100644 index c25ec8008e..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/async_client.py +++ /dev/null @@ -1,332 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, AsyncIterable, Awaitable, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core.client_options import ClientOptions -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.cloud.aiplatform_v1beta1.types import featurestore_online_service -from .transports.base import FeaturestoreOnlineServingServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import FeaturestoreOnlineServingServiceGrpcAsyncIOTransport -from .client import FeaturestoreOnlineServingServiceClient - - -class FeaturestoreOnlineServingServiceAsyncClient: - """A service for serving online feature values.""" - - _client: FeaturestoreOnlineServingServiceClient - - DEFAULT_ENDPOINT = FeaturestoreOnlineServingServiceClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = FeaturestoreOnlineServingServiceClient.DEFAULT_MTLS_ENDPOINT - - entity_type_path = staticmethod(FeaturestoreOnlineServingServiceClient.entity_type_path) - parse_entity_type_path = staticmethod(FeaturestoreOnlineServingServiceClient.parse_entity_type_path) - common_billing_account_path = staticmethod(FeaturestoreOnlineServingServiceClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(FeaturestoreOnlineServingServiceClient.parse_common_billing_account_path) - common_folder_path = staticmethod(FeaturestoreOnlineServingServiceClient.common_folder_path) - parse_common_folder_path = staticmethod(FeaturestoreOnlineServingServiceClient.parse_common_folder_path) - common_organization_path = staticmethod(FeaturestoreOnlineServingServiceClient.common_organization_path) - parse_common_organization_path = staticmethod(FeaturestoreOnlineServingServiceClient.parse_common_organization_path) - common_project_path = staticmethod(FeaturestoreOnlineServingServiceClient.common_project_path) - parse_common_project_path = staticmethod(FeaturestoreOnlineServingServiceClient.parse_common_project_path) - common_location_path = staticmethod(FeaturestoreOnlineServingServiceClient.common_location_path) - parse_common_location_path = staticmethod(FeaturestoreOnlineServingServiceClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - FeaturestoreOnlineServingServiceAsyncClient: The constructed client. - """ - return FeaturestoreOnlineServingServiceClient.from_service_account_info.__func__(FeaturestoreOnlineServingServiceAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - FeaturestoreOnlineServingServiceAsyncClient: The constructed client. - """ - return FeaturestoreOnlineServingServiceClient.from_service_account_file.__func__(FeaturestoreOnlineServingServiceAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> FeaturestoreOnlineServingServiceTransport: - """Returns the transport used by the client instance. - - Returns: - FeaturestoreOnlineServingServiceTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(FeaturestoreOnlineServingServiceClient).get_transport_class, type(FeaturestoreOnlineServingServiceClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, FeaturestoreOnlineServingServiceTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the featurestore online serving service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.FeaturestoreOnlineServingServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = FeaturestoreOnlineServingServiceClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def read_feature_values(self, - request: Union[featurestore_online_service.ReadFeatureValuesRequest, dict] = None, - *, - entity_type: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> featurestore_online_service.ReadFeatureValuesResponse: - r"""Reads Feature values of a specific entity of an - EntityType. For reading feature values of multiple - entities of an EntityType, please use - StreamingReadFeatureValues. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesRequest, dict]): - The request object. Request message for - [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.ReadFeatureValues]. - entity_type (:class:`str`): - Required. The resource name of the EntityType for the - entity being read. Value format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}``. - For example, for a machine learning model predicting - user clicks on a website, an EntityType ID could be - ``user``. - - This corresponds to the ``entity_type`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesResponse: - Response message for - [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.ReadFeatureValues]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([entity_type]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_online_service.ReadFeatureValuesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if entity_type is not None: - request.entity_type = entity_type - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.read_feature_values, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("entity_type", request.entity_type), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def streaming_read_feature_values(self, - request: Union[featurestore_online_service.StreamingReadFeatureValuesRequest, dict] = None, - *, - entity_type: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> Awaitable[AsyncIterable[featurestore_online_service.ReadFeatureValuesResponse]]: - r"""Reads Feature values for multiple entities. Depending - on their size, data for different entities may be broken - up across multiple responses. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.StreamingReadFeatureValuesRequest, dict]): - The request object. Request message for - [FeaturestoreOnlineServingService.StreamingFeatureValuesRead][]. - entity_type (:class:`str`): - Required. The resource name of the entities' type. Value - format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}``. - For example, for a machine learning model predicting - user clicks on a website, an EntityType ID could be - ``user``. - - This corresponds to the ``entity_type`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - AsyncIterable[google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesResponse]: - Response message for - [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.ReadFeatureValues]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([entity_type]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_online_service.StreamingReadFeatureValuesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if entity_type is not None: - request.entity_type = entity_type - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.streaming_read_feature_values, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("entity_type", request.entity_type), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def __aenter__(self): - return self - - async def __aexit__(self, exc_type, exc, tb): - await self.transport.close() - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "FeaturestoreOnlineServingServiceAsyncClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/client.py deleted file mode 100644 index 123efd657e..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/client.py +++ /dev/null @@ -1,530 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import os -import re -from typing import Dict, Optional, Iterable, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.cloud.aiplatform_v1beta1.types import featurestore_online_service -from .transports.base import FeaturestoreOnlineServingServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import FeaturestoreOnlineServingServiceGrpcTransport -from .transports.grpc_asyncio import FeaturestoreOnlineServingServiceGrpcAsyncIOTransport - - -class FeaturestoreOnlineServingServiceClientMeta(type): - """Metaclass for the FeaturestoreOnlineServingService client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[FeaturestoreOnlineServingServiceTransport]] - _transport_registry["grpc"] = FeaturestoreOnlineServingServiceGrpcTransport - _transport_registry["grpc_asyncio"] = FeaturestoreOnlineServingServiceGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[FeaturestoreOnlineServingServiceTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class FeaturestoreOnlineServingServiceClient(metaclass=FeaturestoreOnlineServingServiceClientMeta): - """A service for serving online feature values.""" - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "aiplatform.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - FeaturestoreOnlineServingServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - FeaturestoreOnlineServingServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> FeaturestoreOnlineServingServiceTransport: - """Returns the transport used by the client instance. - - Returns: - FeaturestoreOnlineServingServiceTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def entity_type_path(project: str,location: str,featurestore: str,entity_type: str,) -> str: - """Returns a fully-qualified entity_type string.""" - return "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}".format(project=project, location=location, featurestore=featurestore, entity_type=entity_type, ) - - @staticmethod - def parse_entity_type_path(path: str) -> Dict[str,str]: - """Parses a entity_type path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/featurestores/(?P.+?)/entityTypes/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, FeaturestoreOnlineServingServiceTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the featurestore online serving service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, FeaturestoreOnlineServingServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): - raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, FeaturestoreOnlineServingServiceTransport): - # transport is a FeaturestoreOnlineServingServiceTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - always_use_jwt_access=True, - ) - - def read_feature_values(self, - request: Union[featurestore_online_service.ReadFeatureValuesRequest, dict] = None, - *, - entity_type: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> featurestore_online_service.ReadFeatureValuesResponse: - r"""Reads Feature values of a specific entity of an - EntityType. For reading feature values of multiple - entities of an EntityType, please use - StreamingReadFeatureValues. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesRequest, dict]): - The request object. Request message for - [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.ReadFeatureValues]. - entity_type (str): - Required. The resource name of the EntityType for the - entity being read. Value format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}``. - For example, for a machine learning model predicting - user clicks on a website, an EntityType ID could be - ``user``. - - This corresponds to the ``entity_type`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesResponse: - Response message for - [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.ReadFeatureValues]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([entity_type]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_online_service.ReadFeatureValuesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_online_service.ReadFeatureValuesRequest): - request = featurestore_online_service.ReadFeatureValuesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if entity_type is not None: - request.entity_type = entity_type - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.read_feature_values] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("entity_type", request.entity_type), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def streaming_read_feature_values(self, - request: Union[featurestore_online_service.StreamingReadFeatureValuesRequest, dict] = None, - *, - entity_type: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> Iterable[featurestore_online_service.ReadFeatureValuesResponse]: - r"""Reads Feature values for multiple entities. Depending - on their size, data for different entities may be broken - up across multiple responses. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.StreamingReadFeatureValuesRequest, dict]): - The request object. Request message for - [FeaturestoreOnlineServingService.StreamingFeatureValuesRead][]. - entity_type (str): - Required. The resource name of the entities' type. Value - format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}``. - For example, for a machine learning model predicting - user clicks on a website, an EntityType ID could be - ``user``. - - This corresponds to the ``entity_type`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - Iterable[google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesResponse]: - Response message for - [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.ReadFeatureValues]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([entity_type]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_online_service.StreamingReadFeatureValuesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_online_service.StreamingReadFeatureValuesRequest): - request = featurestore_online_service.StreamingReadFeatureValuesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if entity_type is not None: - request.entity_type = entity_type - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.streaming_read_feature_values] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("entity_type", request.entity_type), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - """Releases underlying transport's resources. - - .. warning:: - ONLY use as a context manager if the transport is NOT shared - with other clients! Exiting the with block will CLOSE the transport - and may cause errors in other clients! - """ - self.transport.close() - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "FeaturestoreOnlineServingServiceClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/__init__.py deleted file mode 100644 index d1abcd0c43..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import FeaturestoreOnlineServingServiceTransport -from .grpc import FeaturestoreOnlineServingServiceGrpcTransport -from .grpc_asyncio import FeaturestoreOnlineServingServiceGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[FeaturestoreOnlineServingServiceTransport]] -_transport_registry['grpc'] = FeaturestoreOnlineServingServiceGrpcTransport -_transport_registry['grpc_asyncio'] = FeaturestoreOnlineServingServiceGrpcAsyncIOTransport - -__all__ = ( - 'FeaturestoreOnlineServingServiceTransport', - 'FeaturestoreOnlineServingServiceGrpcTransport', - 'FeaturestoreOnlineServingServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/base.py deleted file mode 100644 index c6a4035198..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/base.py +++ /dev/null @@ -1,160 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import pkg_resources - -import google.auth # type: ignore -import google.api_core -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.aiplatform_v1beta1.types import featurestore_online_service - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-aiplatform', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -class FeaturestoreOnlineServingServiceTransport(abc.ABC): - """Abstract transport class for FeaturestoreOnlineServingService.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'aiplatform.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials are service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.read_feature_values: gapic_v1.method.wrap_method( - self.read_feature_values, - default_timeout=5.0, - client_info=client_info, - ), - self.streaming_read_feature_values: gapic_v1.method.wrap_method( - self.streaming_read_feature_values, - default_timeout=5.0, - client_info=client_info, - ), - } - - def close(self): - """Closes resources associated with the transport. - - .. warning:: - Only call this method if the transport is NOT shared - with other clients - this may cause errors in other clients! - """ - raise NotImplementedError() - - @property - def read_feature_values(self) -> Callable[ - [featurestore_online_service.ReadFeatureValuesRequest], - Union[ - featurestore_online_service.ReadFeatureValuesResponse, - Awaitable[featurestore_online_service.ReadFeatureValuesResponse] - ]]: - raise NotImplementedError() - - @property - def streaming_read_feature_values(self) -> Callable[ - [featurestore_online_service.StreamingReadFeatureValuesRequest], - Union[ - featurestore_online_service.ReadFeatureValuesResponse, - Awaitable[featurestore_online_service.ReadFeatureValuesResponse] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'FeaturestoreOnlineServingServiceTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/grpc.py deleted file mode 100644 index 3eafa5b301..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/grpc.py +++ /dev/null @@ -1,285 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers -from google.api_core import gapic_v1 -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.aiplatform_v1beta1.types import featurestore_online_service -from .base import FeaturestoreOnlineServingServiceTransport, DEFAULT_CLIENT_INFO - - -class FeaturestoreOnlineServingServiceGrpcTransport(FeaturestoreOnlineServingServiceTransport): - """gRPC backend transport for FeaturestoreOnlineServingService. - - A service for serving online feature values. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def read_feature_values(self) -> Callable[ - [featurestore_online_service.ReadFeatureValuesRequest], - featurestore_online_service.ReadFeatureValuesResponse]: - r"""Return a callable for the read feature values method over gRPC. - - Reads Feature values of a specific entity of an - EntityType. For reading feature values of multiple - entities of an EntityType, please use - StreamingReadFeatureValues. - - Returns: - Callable[[~.ReadFeatureValuesRequest], - ~.ReadFeatureValuesResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'read_feature_values' not in self._stubs: - self._stubs['read_feature_values'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService/ReadFeatureValues', - request_serializer=featurestore_online_service.ReadFeatureValuesRequest.serialize, - response_deserializer=featurestore_online_service.ReadFeatureValuesResponse.deserialize, - ) - return self._stubs['read_feature_values'] - - @property - def streaming_read_feature_values(self) -> Callable[ - [featurestore_online_service.StreamingReadFeatureValuesRequest], - featurestore_online_service.ReadFeatureValuesResponse]: - r"""Return a callable for the streaming read feature values method over gRPC. - - Reads Feature values for multiple entities. Depending - on their size, data for different entities may be broken - up across multiple responses. - - Returns: - Callable[[~.StreamingReadFeatureValuesRequest], - ~.ReadFeatureValuesResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'streaming_read_feature_values' not in self._stubs: - self._stubs['streaming_read_feature_values'] = self.grpc_channel.unary_stream( - '/google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService/StreamingReadFeatureValues', - request_serializer=featurestore_online_service.StreamingReadFeatureValuesRequest.serialize, - response_deserializer=featurestore_online_service.ReadFeatureValuesResponse.deserialize, - ) - return self._stubs['streaming_read_feature_values'] - - def close(self): - self.grpc_channel.close() - -__all__ = ( - 'FeaturestoreOnlineServingServiceGrpcTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/grpc_asyncio.py deleted file mode 100644 index f96a30f70e..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/grpc_asyncio.py +++ /dev/null @@ -1,289 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers_async -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.aiplatform_v1beta1.types import featurestore_online_service -from .base import FeaturestoreOnlineServingServiceTransport, DEFAULT_CLIENT_INFO -from .grpc import FeaturestoreOnlineServingServiceGrpcTransport - - -class FeaturestoreOnlineServingServiceGrpcAsyncIOTransport(FeaturestoreOnlineServingServiceTransport): - """gRPC AsyncIO backend transport for FeaturestoreOnlineServingService. - - A service for serving online feature values. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def read_feature_values(self) -> Callable[ - [featurestore_online_service.ReadFeatureValuesRequest], - Awaitable[featurestore_online_service.ReadFeatureValuesResponse]]: - r"""Return a callable for the read feature values method over gRPC. - - Reads Feature values of a specific entity of an - EntityType. For reading feature values of multiple - entities of an EntityType, please use - StreamingReadFeatureValues. - - Returns: - Callable[[~.ReadFeatureValuesRequest], - Awaitable[~.ReadFeatureValuesResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'read_feature_values' not in self._stubs: - self._stubs['read_feature_values'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService/ReadFeatureValues', - request_serializer=featurestore_online_service.ReadFeatureValuesRequest.serialize, - response_deserializer=featurestore_online_service.ReadFeatureValuesResponse.deserialize, - ) - return self._stubs['read_feature_values'] - - @property - def streaming_read_feature_values(self) -> Callable[ - [featurestore_online_service.StreamingReadFeatureValuesRequest], - Awaitable[featurestore_online_service.ReadFeatureValuesResponse]]: - r"""Return a callable for the streaming read feature values method over gRPC. - - Reads Feature values for multiple entities. Depending - on their size, data for different entities may be broken - up across multiple responses. - - Returns: - Callable[[~.StreamingReadFeatureValuesRequest], - Awaitable[~.ReadFeatureValuesResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'streaming_read_feature_values' not in self._stubs: - self._stubs['streaming_read_feature_values'] = self.grpc_channel.unary_stream( - '/google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService/StreamingReadFeatureValues', - request_serializer=featurestore_online_service.StreamingReadFeatureValuesRequest.serialize, - response_deserializer=featurestore_online_service.ReadFeatureValuesResponse.deserialize, - ) - return self._stubs['streaming_read_feature_values'] - - def close(self): - return self.grpc_channel.close() - - -__all__ = ( - 'FeaturestoreOnlineServingServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/__init__.py deleted file mode 100644 index 81716ce8fe..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import FeaturestoreServiceClient -from .async_client import FeaturestoreServiceAsyncClient - -__all__ = ( - 'FeaturestoreServiceClient', - 'FeaturestoreServiceAsyncClient', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/async_client.py deleted file mode 100644 index dd65d7bb15..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/async_client.py +++ /dev/null @@ -1,2224 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core.client_options import ClientOptions -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.featurestore_service import pagers -from google.cloud.aiplatform_v1beta1.types import encryption_spec -from google.cloud.aiplatform_v1beta1.types import entity_type -from google.cloud.aiplatform_v1beta1.types import entity_type as gca_entity_type -from google.cloud.aiplatform_v1beta1.types import feature -from google.cloud.aiplatform_v1beta1.types import feature as gca_feature -from google.cloud.aiplatform_v1beta1.types import feature_monitoring_stats -from google.cloud.aiplatform_v1beta1.types import featurestore -from google.cloud.aiplatform_v1beta1.types import featurestore as gca_featurestore -from google.cloud.aiplatform_v1beta1.types import featurestore_monitoring -from google.cloud.aiplatform_v1beta1.types import featurestore_service -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import FeaturestoreServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import FeaturestoreServiceGrpcAsyncIOTransport -from .client import FeaturestoreServiceClient - - -class FeaturestoreServiceAsyncClient: - """The service that handles CRUD and List for resources for - Featurestore. - """ - - _client: FeaturestoreServiceClient - - DEFAULT_ENDPOINT = FeaturestoreServiceClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = FeaturestoreServiceClient.DEFAULT_MTLS_ENDPOINT - - entity_type_path = staticmethod(FeaturestoreServiceClient.entity_type_path) - parse_entity_type_path = staticmethod(FeaturestoreServiceClient.parse_entity_type_path) - feature_path = staticmethod(FeaturestoreServiceClient.feature_path) - parse_feature_path = staticmethod(FeaturestoreServiceClient.parse_feature_path) - featurestore_path = staticmethod(FeaturestoreServiceClient.featurestore_path) - parse_featurestore_path = staticmethod(FeaturestoreServiceClient.parse_featurestore_path) - common_billing_account_path = staticmethod(FeaturestoreServiceClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(FeaturestoreServiceClient.parse_common_billing_account_path) - common_folder_path = staticmethod(FeaturestoreServiceClient.common_folder_path) - parse_common_folder_path = staticmethod(FeaturestoreServiceClient.parse_common_folder_path) - common_organization_path = staticmethod(FeaturestoreServiceClient.common_organization_path) - parse_common_organization_path = staticmethod(FeaturestoreServiceClient.parse_common_organization_path) - common_project_path = staticmethod(FeaturestoreServiceClient.common_project_path) - parse_common_project_path = staticmethod(FeaturestoreServiceClient.parse_common_project_path) - common_location_path = staticmethod(FeaturestoreServiceClient.common_location_path) - parse_common_location_path = staticmethod(FeaturestoreServiceClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - FeaturestoreServiceAsyncClient: The constructed client. - """ - return FeaturestoreServiceClient.from_service_account_info.__func__(FeaturestoreServiceAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - FeaturestoreServiceAsyncClient: The constructed client. - """ - return FeaturestoreServiceClient.from_service_account_file.__func__(FeaturestoreServiceAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> FeaturestoreServiceTransport: - """Returns the transport used by the client instance. - - Returns: - FeaturestoreServiceTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(FeaturestoreServiceClient).get_transport_class, type(FeaturestoreServiceClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, FeaturestoreServiceTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the featurestore service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.FeaturestoreServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = FeaturestoreServiceClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def create_featurestore(self, - request: Union[featurestore_service.CreateFeaturestoreRequest, dict] = None, - *, - parent: str = None, - featurestore: gca_featurestore.Featurestore = None, - featurestore_id: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Creates a new Featurestore in a given project and - location. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CreateFeaturestoreRequest, dict]): - The request object. Request message for - [FeaturestoreService.CreateFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateFeaturestore]. - parent (:class:`str`): - Required. The resource name of the Location to create - Featurestores. Format: - ``projects/{project}/locations/{location}'`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - featurestore (:class:`google.cloud.aiplatform_v1beta1.types.Featurestore`): - Required. The Featurestore to create. - This corresponds to the ``featurestore`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - featurestore_id (:class:`str`): - Required. The ID to use for this Featurestore, which - will become the final component of the Featurestore's - resource name. - - This value may be up to 60 characters, and valid - characters are ``[a-z0-9_]``. The first character cannot - be a number. - - The value must be unique within the project and - location. - - This corresponds to the ``featurestore_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Featurestore` Vertex AI Feature Store provides a centralized repository for organizing, - storing, and serving ML features. The Featurestore is - a top-level container for your features and their - values. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, featurestore, featurestore_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_service.CreateFeaturestoreRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if featurestore is not None: - request.featurestore = featurestore - if featurestore_id is not None: - request.featurestore_id = featurestore_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_featurestore, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - gca_featurestore.Featurestore, - metadata_type=featurestore_service.CreateFeaturestoreOperationMetadata, - ) - - # Done; return the response. - return response - - async def get_featurestore(self, - request: Union[featurestore_service.GetFeaturestoreRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> featurestore.Featurestore: - r"""Gets details of a single Featurestore. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.GetFeaturestoreRequest, dict]): - The request object. Request message for - [FeaturestoreService.GetFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.GetFeaturestore]. - name (:class:`str`): - Required. The name of the - Featurestore resource. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Featurestore: - Vertex AI Feature Store provides a - centralized repository for organizing, - storing, and serving ML features. The - Featurestore is a top-level container - for your features and their values. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_service.GetFeaturestoreRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_featurestore, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_featurestores(self, - request: Union[featurestore_service.ListFeaturestoresRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListFeaturestoresAsyncPager: - r"""Lists Featurestores in a given project and location. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListFeaturestoresRequest, dict]): - The request object. Request message for - [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeaturestores]. - parent (:class:`str`): - Required. The resource name of the Location to list - Featurestores. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.ListFeaturestoresAsyncPager: - Response message for - [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeaturestores]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_service.ListFeaturestoresRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_featurestores, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListFeaturestoresAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_featurestore(self, - request: Union[featurestore_service.UpdateFeaturestoreRequest, dict] = None, - *, - featurestore: gca_featurestore.Featurestore = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Updates the parameters of a single Featurestore. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.UpdateFeaturestoreRequest, dict]): - The request object. Request message for - [FeaturestoreService.UpdateFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateFeaturestore]. - featurestore (:class:`google.cloud.aiplatform_v1beta1.types.Featurestore`): - Required. The Featurestore's ``name`` field is used to - identify the Featurestore to be updated. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}`` - - This corresponds to the ``featurestore`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Field mask is used to specify the fields to be - overwritten in the Featurestore resource by the update. - The fields specified in the update_mask are relative to - the resource, not the full request. A field will be - overwritten if it is in the mask. If the user does not - provide a mask then only the non-empty fields present in - the request will be overwritten. Set the update_mask to - ``*`` to override all fields. - - Updatable fields: - - - ``labels`` - - ``online_serving_config.fixed_node_count`` - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Featurestore` Vertex AI Feature Store provides a centralized repository for organizing, - storing, and serving ML features. The Featurestore is - a top-level container for your features and their - values. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([featurestore, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_service.UpdateFeaturestoreRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if featurestore is not None: - request.featurestore = featurestore - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_featurestore, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("featurestore.name", request.featurestore.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - gca_featurestore.Featurestore, - metadata_type=featurestore_service.UpdateFeaturestoreOperationMetadata, - ) - - # Done; return the response. - return response - - async def delete_featurestore(self, - request: Union[featurestore_service.DeleteFeaturestoreRequest, dict] = None, - *, - name: str = None, - force: bool = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a single Featurestore. The Featurestore must not contain - any EntityTypes or ``force`` must be set to true for the request - to succeed. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.DeleteFeaturestoreRequest, dict]): - The request object. Request message for - [FeaturestoreService.DeleteFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteFeaturestore]. - name (:class:`str`): - Required. The name of the Featurestore to be deleted. - Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - force (:class:`bool`): - If set to true, any EntityTypes and - Features for this Featurestore will also - be deleted. (Otherwise, the request will - only work if the Featurestore has no - EntityTypes.) - - This corresponds to the ``force`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, force]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_service.DeleteFeaturestoreRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if force is not None: - request.force = force - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_featurestore, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def create_entity_type(self, - request: Union[featurestore_service.CreateEntityTypeRequest, dict] = None, - *, - parent: str = None, - entity_type: gca_entity_type.EntityType = None, - entity_type_id: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Creates a new EntityType in a given Featurestore. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CreateEntityTypeRequest, dict]): - The request object. Request message for - [FeaturestoreService.CreateEntityType][google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateEntityType]. - parent (:class:`str`): - Required. The resource name of the Featurestore to - create EntityTypes. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - entity_type (:class:`google.cloud.aiplatform_v1beta1.types.EntityType`): - The EntityType to create. - This corresponds to the ``entity_type`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - entity_type_id (:class:`str`): - Required. The ID to use for the EntityType, which will - become the final component of the EntityType's resource - name. - - This value may be up to 60 characters, and valid - characters are ``[a-z0-9_]``. The first character cannot - be a number. - - The value must be unique within a featurestore. - - This corresponds to the ``entity_type_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.EntityType` An entity type is a type of object in a system that needs to be modeled and - have stored information about. For example, driver is - an entity type, and driver0 is an instance of an - entity type driver. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, entity_type, entity_type_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_service.CreateEntityTypeRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if entity_type is not None: - request.entity_type = entity_type - if entity_type_id is not None: - request.entity_type_id = entity_type_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_entity_type, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - gca_entity_type.EntityType, - metadata_type=featurestore_service.CreateEntityTypeOperationMetadata, - ) - - # Done; return the response. - return response - - async def get_entity_type(self, - request: Union[featurestore_service.GetEntityTypeRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> entity_type.EntityType: - r"""Gets details of a single EntityType. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.GetEntityTypeRequest, dict]): - The request object. Request message for - [FeaturestoreService.GetEntityType][google.cloud.aiplatform.v1beta1.FeaturestoreService.GetEntityType]. - name (:class:`str`): - Required. The name of the EntityType resource. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.EntityType: - An entity type is a type of object in - a system that needs to be modeled and - have stored information about. For - example, driver is an entity type, and - driver0 is an instance of an entity type - driver. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_service.GetEntityTypeRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_entity_type, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_entity_types(self, - request: Union[featurestore_service.ListEntityTypesRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListEntityTypesAsyncPager: - r"""Lists EntityTypes in a given Featurestore. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListEntityTypesRequest, dict]): - The request object. Request message for - [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListEntityTypes]. - parent (:class:`str`): - Required. The resource name of the Featurestore to list - EntityTypes. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.ListEntityTypesAsyncPager: - Response message for - [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListEntityTypes]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_service.ListEntityTypesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_entity_types, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListEntityTypesAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_entity_type(self, - request: Union[featurestore_service.UpdateEntityTypeRequest, dict] = None, - *, - entity_type: gca_entity_type.EntityType = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_entity_type.EntityType: - r"""Updates the parameters of a single EntityType. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.UpdateEntityTypeRequest, dict]): - The request object. Request message for - [FeaturestoreService.UpdateEntityType][google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateEntityType]. - entity_type (:class:`google.cloud.aiplatform_v1beta1.types.EntityType`): - Required. The EntityType's ``name`` field is used to - identify the EntityType to be updated. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - - This corresponds to the ``entity_type`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Field mask is used to specify the fields to be - overwritten in the EntityType resource by the update. - The fields specified in the update_mask are relative to - the resource, not the full request. A field will be - overwritten if it is in the mask. If the user does not - provide a mask then only the non-empty fields present in - the request will be overwritten. Set the update_mask to - ``*`` to override all fields. - - Updatable fields: - - - ``description`` - - ``labels`` - - ``monitoring_config.snapshot_analysis.disabled`` - - ``monitoring_config.snapshot_analysis.monitoring_interval`` - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.EntityType: - An entity type is a type of object in - a system that needs to be modeled and - have stored information about. For - example, driver is an entity type, and - driver0 is an instance of an entity type - driver. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([entity_type, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_service.UpdateEntityTypeRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if entity_type is not None: - request.entity_type = entity_type - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_entity_type, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("entity_type.name", request.entity_type.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_entity_type(self, - request: Union[featurestore_service.DeleteEntityTypeRequest, dict] = None, - *, - name: str = None, - force: bool = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a single EntityType. The EntityType must not have any - Features or ``force`` must be set to true for the request to - succeed. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.DeleteEntityTypeRequest, dict]): - The request object. Request message for - [FeaturestoreService.DeleteEntityTypes][]. - name (:class:`str`): - Required. The name of the EntityType to be deleted. - Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - force (:class:`bool`): - If set to true, any Features for this - EntityType will also be deleted. - (Otherwise, the request will only work - if the EntityType has no Features.) - - This corresponds to the ``force`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, force]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_service.DeleteEntityTypeRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if force is not None: - request.force = force - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_entity_type, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def create_feature(self, - request: Union[featurestore_service.CreateFeatureRequest, dict] = None, - *, - parent: str = None, - feature: gca_feature.Feature = None, - feature_id: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Creates a new Feature in a given EntityType. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CreateFeatureRequest, dict]): - The request object. Request message for - [FeaturestoreService.CreateFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateFeature]. - parent (:class:`str`): - Required. The resource name of the EntityType to create - a Feature. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - feature (:class:`google.cloud.aiplatform_v1beta1.types.Feature`): - Required. The Feature to create. - This corresponds to the ``feature`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - feature_id (:class:`str`): - Required. The ID to use for the Feature, which will - become the final component of the Feature's resource - name. - - This value may be up to 60 characters, and valid - characters are ``[a-z0-9_]``. The first character cannot - be a number. - - The value must be unique within an EntityType. - - This corresponds to the ``feature_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Feature` Feature Metadata information that describes an attribute of an entity type. - For example, apple is an entity type, and color is a - feature that describes apple. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, feature, feature_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_service.CreateFeatureRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if feature is not None: - request.feature = feature - if feature_id is not None: - request.feature_id = feature_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_feature, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - gca_feature.Feature, - metadata_type=featurestore_service.CreateFeatureOperationMetadata, - ) - - # Done; return the response. - return response - - async def batch_create_features(self, - request: Union[featurestore_service.BatchCreateFeaturesRequest, dict] = None, - *, - parent: str = None, - requests: Sequence[featurestore_service.CreateFeatureRequest] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Creates a batch of Features in a given EntityType. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.BatchCreateFeaturesRequest, dict]): - The request object. Request message for - [FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchCreateFeatures]. - parent (:class:`str`): - Required. The resource name of the EntityType to create - the batch of Features under. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - requests (:class:`Sequence[google.cloud.aiplatform_v1beta1.types.CreateFeatureRequest]`): - Required. The request message specifying the Features to - create. All Features must be created under the same - parent EntityType. The ``parent`` field in each child - request message can be omitted. If ``parent`` is set in - a child request, then the value must match the - ``parent`` value in this request message. - - This corresponds to the ``requests`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.BatchCreateFeaturesResponse` - Response message for - [FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchCreateFeatures]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, requests]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_service.BatchCreateFeaturesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if requests: - request.requests.extend(requests) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.batch_create_features, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - featurestore_service.BatchCreateFeaturesResponse, - metadata_type=featurestore_service.BatchCreateFeaturesOperationMetadata, - ) - - # Done; return the response. - return response - - async def get_feature(self, - request: Union[featurestore_service.GetFeatureRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> feature.Feature: - r"""Gets details of a single Feature. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.GetFeatureRequest, dict]): - The request object. Request message for - [FeaturestoreService.GetFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.GetFeature]. - name (:class:`str`): - Required. The name of the Feature resource. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Feature: - Feature Metadata information that - describes an attribute of an entity - type. For example, apple is an entity - type, and color is a feature that - describes apple. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_service.GetFeatureRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_feature, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_features(self, - request: Union[featurestore_service.ListFeaturesRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListFeaturesAsyncPager: - r"""Lists Features in a given EntityType. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListFeaturesRequest, dict]): - The request object. Request message for - [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeatures]. - parent (:class:`str`): - Required. The resource name of the Location to list - Features. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.ListFeaturesAsyncPager: - Response message for - [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeatures]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_service.ListFeaturesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_features, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListFeaturesAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_feature(self, - request: Union[featurestore_service.UpdateFeatureRequest, dict] = None, - *, - feature: gca_feature.Feature = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_feature.Feature: - r"""Updates the parameters of a single Feature. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.UpdateFeatureRequest, dict]): - The request object. Request message for - [FeaturestoreService.UpdateFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateFeature]. - feature (:class:`google.cloud.aiplatform_v1beta1.types.Feature`): - Required. The Feature's ``name`` field is used to - identify the Feature to be updated. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}`` - - This corresponds to the ``feature`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Field mask is used to specify the fields to be - overwritten in the Features resource by the update. The - fields specified in the update_mask are relative to the - resource, not the full request. A field will be - overwritten if it is in the mask. If the user does not - provide a mask then only the non-empty fields present in - the request will be overwritten. Set the update_mask to - ``*`` to override all fields. - - Updatable fields: - - - ``description`` - - ``labels`` - - ``monitoring_config.snapshot_analysis.disabled`` - - ``monitoring_config.snapshot_analysis.monitoring_interval`` - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Feature: - Feature Metadata information that - describes an attribute of an entity - type. For example, apple is an entity - type, and color is a feature that - describes apple. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([feature, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_service.UpdateFeatureRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if feature is not None: - request.feature = feature - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_feature, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("feature.name", request.feature.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_feature(self, - request: Union[featurestore_service.DeleteFeatureRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a single Feature. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.DeleteFeatureRequest, dict]): - The request object. Request message for - [FeaturestoreService.DeleteFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteFeature]. - name (:class:`str`): - Required. The name of the Features to be deleted. - Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_service.DeleteFeatureRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_feature, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def import_feature_values(self, - request: Union[featurestore_service.ImportFeatureValuesRequest, dict] = None, - *, - entity_type: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Imports Feature values into the Featurestore from a - source storage. - The progress of the import is tracked by the returned - operation. The imported features are guaranteed to be - visible to subsequent read operations after the - operation is marked as successfully done. - If an import operation fails, the Feature values - returned from reads and exports may be inconsistent. If - consistency is required, the caller must retry the same - import request again and wait till the new operation - returned is marked as successfully done. - There are also scenarios where the caller can cause - inconsistency. - - Source data for import contains multiple distinct - Feature values for the same entity ID and timestamp. - - Source is modified during an import. This includes - adding, updating, or removing source data and/or - metadata. Examples of updating metadata include but are - not limited to changing storage location, storage class, - or retention policy. - - Online serving cluster is under-provisioned. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ImportFeatureValuesRequest, dict]): - The request object. Request message for - [FeaturestoreService.ImportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ImportFeatureValues]. - entity_type (:class:`str`): - Required. The resource name of the EntityType grouping - the Features for which values are being imported. - Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}`` - - This corresponds to the ``entity_type`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.ImportFeatureValuesResponse` - Response message for - [FeaturestoreService.ImportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ImportFeatureValues]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([entity_type]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_service.ImportFeatureValuesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if entity_type is not None: - request.entity_type = entity_type - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.import_feature_values, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("entity_type", request.entity_type), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - featurestore_service.ImportFeatureValuesResponse, - metadata_type=featurestore_service.ImportFeatureValuesOperationMetadata, - ) - - # Done; return the response. - return response - - async def batch_read_feature_values(self, - request: Union[featurestore_service.BatchReadFeatureValuesRequest, dict] = None, - *, - featurestore: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Batch reads Feature values from a Featurestore. - This API enables batch reading Feature values, where - each read instance in the batch may read Feature values - of entities from one or more EntityTypes. Point-in-time - correctness is guaranteed for Feature values of each - read instance as of each instance's read timestamp. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.BatchReadFeatureValuesRequest, dict]): - The request object. Request message for - [FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchReadFeatureValues]. - featurestore (:class:`str`): - Required. The resource name of the Featurestore from - which to query Feature values. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}`` - - This corresponds to the ``featurestore`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.BatchReadFeatureValuesResponse` - Response message for - [FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchReadFeatureValues]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([featurestore]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_service.BatchReadFeatureValuesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if featurestore is not None: - request.featurestore = featurestore - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.batch_read_feature_values, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("featurestore", request.featurestore), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - featurestore_service.BatchReadFeatureValuesResponse, - metadata_type=featurestore_service.BatchReadFeatureValuesOperationMetadata, - ) - - # Done; return the response. - return response - - async def export_feature_values(self, - request: Union[featurestore_service.ExportFeatureValuesRequest, dict] = None, - *, - entity_type: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Exports Feature values from all the entities of a - target EntityType. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ExportFeatureValuesRequest, dict]): - The request object. Request message for - [FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ExportFeatureValues]. - entity_type (:class:`str`): - Required. The resource name of the EntityType from which - to export Feature values. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - - This corresponds to the ``entity_type`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.ExportFeatureValuesResponse` - Response message for - [FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ExportFeatureValues]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([entity_type]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_service.ExportFeatureValuesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if entity_type is not None: - request.entity_type = entity_type - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.export_feature_values, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("entity_type", request.entity_type), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - featurestore_service.ExportFeatureValuesResponse, - metadata_type=featurestore_service.ExportFeatureValuesOperationMetadata, - ) - - # Done; return the response. - return response - - async def search_features(self, - request: Union[featurestore_service.SearchFeaturesRequest, dict] = None, - *, - location: str = None, - query: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.SearchFeaturesAsyncPager: - r"""Searches Features matching a query in a given - project. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.SearchFeaturesRequest, dict]): - The request object. Request message for - [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.SearchFeatures]. - location (:class:`str`): - Required. The resource name of the Location to search - Features. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``location`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - query (:class:`str`): - Query string that is a conjunction of field-restricted - queries and/or field-restricted filters. - Field-restricted queries and filters can be combined - using ``AND`` to form a conjunction. - - A field query is in the form FIELD:QUERY. This - implicitly checks if QUERY exists as a substring within - Feature's FIELD. The QUERY and the FIELD are converted - to a sequence of words (i.e. tokens) for comparison. - This is done by: - - - Removing leading/trailing whitespace and tokenizing - the search value. Characters that are not one of - alphanumeric ``[a-zA-Z0-9]``, underscore ``_``, or - asterisk ``*`` are treated as delimiters for tokens. - ``*`` is treated as a wildcard that matches - characters within a token. - - Ignoring case. - - Prepending an asterisk to the first and appending an - asterisk to the last token in QUERY. - - A QUERY must be either a singular token or a phrase. A - phrase is one or multiple words enclosed in double - quotation marks ("). With phrases, the order of the - words is important. Words in the phrase must be matching - in order and consecutively. - - Supported FIELDs for field-restricted queries: - - - ``feature_id`` - - ``description`` - - ``entity_type_id`` - - Examples: - - - ``feature_id: foo`` --> Matches a Feature with ID - containing the substring ``foo`` (eg. ``foo``, - ``foofeature``, ``barfoo``). - - ``feature_id: foo*feature`` --> Matches a Feature - with ID containing the substring ``foo*feature`` (eg. - ``foobarfeature``). - - ``feature_id: foo AND description: bar`` --> Matches - a Feature with ID containing the substring ``foo`` - and description containing the substring ``bar``. - - Besides field queries, the following exact-match filters - are supported. The exact-match filters do not support - wildcards. Unlike field-restricted queries, exact-match - filters are case-sensitive. - - - ``feature_id``: Supports = comparisons. - - ``description``: Supports = comparisons. Multi-token - filters should be enclosed in quotes. - - ``entity_type_id``: Supports = comparisons. - - ``value_type``: Supports = and != comparisons. - - ``labels``: Supports key-value equality as well as - key presence. - - ``featurestore_id``: Supports = comparisons. - - Examples: - - - ``description = "foo bar"`` --> Any Feature with - description exactly equal to ``foo bar`` - - ``value_type = DOUBLE`` --> Features whose type is - DOUBLE. - - ``labels.active = yes AND labels.env = prod`` --> - Features having both (active: yes) and (env: prod) - labels. - - ``labels.env: *`` --> Any Feature which has a label - with ``env`` as the key. - - This corresponds to the ``query`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.SearchFeaturesAsyncPager: - Response message for - [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.SearchFeatures]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([location, query]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = featurestore_service.SearchFeaturesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if location is not None: - request.location = location - if query is not None: - request.query = query - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.search_features, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("location", request.location), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.SearchFeaturesAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def __aenter__(self): - return self - - async def __aexit__(self, exc_type, exc, tb): - await self.transport.close() - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "FeaturestoreServiceAsyncClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/client.py deleted file mode 100644 index 80a090ad2a..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/client.py +++ /dev/null @@ -1,2440 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import os -import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.featurestore_service import pagers -from google.cloud.aiplatform_v1beta1.types import encryption_spec -from google.cloud.aiplatform_v1beta1.types import entity_type -from google.cloud.aiplatform_v1beta1.types import entity_type as gca_entity_type -from google.cloud.aiplatform_v1beta1.types import feature -from google.cloud.aiplatform_v1beta1.types import feature as gca_feature -from google.cloud.aiplatform_v1beta1.types import feature_monitoring_stats -from google.cloud.aiplatform_v1beta1.types import featurestore -from google.cloud.aiplatform_v1beta1.types import featurestore as gca_featurestore -from google.cloud.aiplatform_v1beta1.types import featurestore_monitoring -from google.cloud.aiplatform_v1beta1.types import featurestore_service -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import FeaturestoreServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import FeaturestoreServiceGrpcTransport -from .transports.grpc_asyncio import FeaturestoreServiceGrpcAsyncIOTransport - - -class FeaturestoreServiceClientMeta(type): - """Metaclass for the FeaturestoreService client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[FeaturestoreServiceTransport]] - _transport_registry["grpc"] = FeaturestoreServiceGrpcTransport - _transport_registry["grpc_asyncio"] = FeaturestoreServiceGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[FeaturestoreServiceTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class FeaturestoreServiceClient(metaclass=FeaturestoreServiceClientMeta): - """The service that handles CRUD and List for resources for - Featurestore. - """ - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "aiplatform.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - FeaturestoreServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - FeaturestoreServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> FeaturestoreServiceTransport: - """Returns the transport used by the client instance. - - Returns: - FeaturestoreServiceTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def entity_type_path(project: str,location: str,featurestore: str,entity_type: str,) -> str: - """Returns a fully-qualified entity_type string.""" - return "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}".format(project=project, location=location, featurestore=featurestore, entity_type=entity_type, ) - - @staticmethod - def parse_entity_type_path(path: str) -> Dict[str,str]: - """Parses a entity_type path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/featurestores/(?P.+?)/entityTypes/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def feature_path(project: str,location: str,featurestore: str,entity_type: str,feature: str,) -> str: - """Returns a fully-qualified feature string.""" - return "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}".format(project=project, location=location, featurestore=featurestore, entity_type=entity_type, feature=feature, ) - - @staticmethod - def parse_feature_path(path: str) -> Dict[str,str]: - """Parses a feature path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/featurestores/(?P.+?)/entityTypes/(?P.+?)/features/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def featurestore_path(project: str,location: str,featurestore: str,) -> str: - """Returns a fully-qualified featurestore string.""" - return "projects/{project}/locations/{location}/featurestores/{featurestore}".format(project=project, location=location, featurestore=featurestore, ) - - @staticmethod - def parse_featurestore_path(path: str) -> Dict[str,str]: - """Parses a featurestore path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/featurestores/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, FeaturestoreServiceTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the featurestore service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, FeaturestoreServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): - raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, FeaturestoreServiceTransport): - # transport is a FeaturestoreServiceTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - always_use_jwt_access=True, - ) - - def create_featurestore(self, - request: Union[featurestore_service.CreateFeaturestoreRequest, dict] = None, - *, - parent: str = None, - featurestore: gca_featurestore.Featurestore = None, - featurestore_id: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Creates a new Featurestore in a given project and - location. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CreateFeaturestoreRequest, dict]): - The request object. Request message for - [FeaturestoreService.CreateFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateFeaturestore]. - parent (str): - Required. The resource name of the Location to create - Featurestores. Format: - ``projects/{project}/locations/{location}'`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - featurestore (google.cloud.aiplatform_v1beta1.types.Featurestore): - Required. The Featurestore to create. - This corresponds to the ``featurestore`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - featurestore_id (str): - Required. The ID to use for this Featurestore, which - will become the final component of the Featurestore's - resource name. - - This value may be up to 60 characters, and valid - characters are ``[a-z0-9_]``. The first character cannot - be a number. - - The value must be unique within the project and - location. - - This corresponds to the ``featurestore_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Featurestore` Vertex AI Feature Store provides a centralized repository for organizing, - storing, and serving ML features. The Featurestore is - a top-level container for your features and their - values. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, featurestore, featurestore_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_service.CreateFeaturestoreRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_service.CreateFeaturestoreRequest): - request = featurestore_service.CreateFeaturestoreRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if featurestore is not None: - request.featurestore = featurestore - if featurestore_id is not None: - request.featurestore_id = featurestore_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_featurestore] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - gca_featurestore.Featurestore, - metadata_type=featurestore_service.CreateFeaturestoreOperationMetadata, - ) - - # Done; return the response. - return response - - def get_featurestore(self, - request: Union[featurestore_service.GetFeaturestoreRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> featurestore.Featurestore: - r"""Gets details of a single Featurestore. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.GetFeaturestoreRequest, dict]): - The request object. Request message for - [FeaturestoreService.GetFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.GetFeaturestore]. - name (str): - Required. The name of the - Featurestore resource. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Featurestore: - Vertex AI Feature Store provides a - centralized repository for organizing, - storing, and serving ML features. The - Featurestore is a top-level container - for your features and their values. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_service.GetFeaturestoreRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_service.GetFeaturestoreRequest): - request = featurestore_service.GetFeaturestoreRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_featurestore] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_featurestores(self, - request: Union[featurestore_service.ListFeaturestoresRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListFeaturestoresPager: - r"""Lists Featurestores in a given project and location. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListFeaturestoresRequest, dict]): - The request object. Request message for - [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeaturestores]. - parent (str): - Required. The resource name of the Location to list - Featurestores. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.ListFeaturestoresPager: - Response message for - [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeaturestores]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_service.ListFeaturestoresRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_service.ListFeaturestoresRequest): - request = featurestore_service.ListFeaturestoresRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_featurestores] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListFeaturestoresPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_featurestore(self, - request: Union[featurestore_service.UpdateFeaturestoreRequest, dict] = None, - *, - featurestore: gca_featurestore.Featurestore = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Updates the parameters of a single Featurestore. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.UpdateFeaturestoreRequest, dict]): - The request object. Request message for - [FeaturestoreService.UpdateFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateFeaturestore]. - featurestore (google.cloud.aiplatform_v1beta1.types.Featurestore): - Required. The Featurestore's ``name`` field is used to - identify the Featurestore to be updated. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}`` - - This corresponds to the ``featurestore`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Field mask is used to specify the fields to be - overwritten in the Featurestore resource by the update. - The fields specified in the update_mask are relative to - the resource, not the full request. A field will be - overwritten if it is in the mask. If the user does not - provide a mask then only the non-empty fields present in - the request will be overwritten. Set the update_mask to - ``*`` to override all fields. - - Updatable fields: - - - ``labels`` - - ``online_serving_config.fixed_node_count`` - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Featurestore` Vertex AI Feature Store provides a centralized repository for organizing, - storing, and serving ML features. The Featurestore is - a top-level container for your features and their - values. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([featurestore, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_service.UpdateFeaturestoreRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_service.UpdateFeaturestoreRequest): - request = featurestore_service.UpdateFeaturestoreRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if featurestore is not None: - request.featurestore = featurestore - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_featurestore] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("featurestore.name", request.featurestore.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - gca_featurestore.Featurestore, - metadata_type=featurestore_service.UpdateFeaturestoreOperationMetadata, - ) - - # Done; return the response. - return response - - def delete_featurestore(self, - request: Union[featurestore_service.DeleteFeaturestoreRequest, dict] = None, - *, - name: str = None, - force: bool = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a single Featurestore. The Featurestore must not contain - any EntityTypes or ``force`` must be set to true for the request - to succeed. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.DeleteFeaturestoreRequest, dict]): - The request object. Request message for - [FeaturestoreService.DeleteFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteFeaturestore]. - name (str): - Required. The name of the Featurestore to be deleted. - Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - force (bool): - If set to true, any EntityTypes and - Features for this Featurestore will also - be deleted. (Otherwise, the request will - only work if the Featurestore has no - EntityTypes.) - - This corresponds to the ``force`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, force]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_service.DeleteFeaturestoreRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_service.DeleteFeaturestoreRequest): - request = featurestore_service.DeleteFeaturestoreRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if force is not None: - request.force = force - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_featurestore] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def create_entity_type(self, - request: Union[featurestore_service.CreateEntityTypeRequest, dict] = None, - *, - parent: str = None, - entity_type: gca_entity_type.EntityType = None, - entity_type_id: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Creates a new EntityType in a given Featurestore. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CreateEntityTypeRequest, dict]): - The request object. Request message for - [FeaturestoreService.CreateEntityType][google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateEntityType]. - parent (str): - Required. The resource name of the Featurestore to - create EntityTypes. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - entity_type (google.cloud.aiplatform_v1beta1.types.EntityType): - The EntityType to create. - This corresponds to the ``entity_type`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - entity_type_id (str): - Required. The ID to use for the EntityType, which will - become the final component of the EntityType's resource - name. - - This value may be up to 60 characters, and valid - characters are ``[a-z0-9_]``. The first character cannot - be a number. - - The value must be unique within a featurestore. - - This corresponds to the ``entity_type_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.EntityType` An entity type is a type of object in a system that needs to be modeled and - have stored information about. For example, driver is - an entity type, and driver0 is an instance of an - entity type driver. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, entity_type, entity_type_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_service.CreateEntityTypeRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_service.CreateEntityTypeRequest): - request = featurestore_service.CreateEntityTypeRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if entity_type is not None: - request.entity_type = entity_type - if entity_type_id is not None: - request.entity_type_id = entity_type_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_entity_type] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - gca_entity_type.EntityType, - metadata_type=featurestore_service.CreateEntityTypeOperationMetadata, - ) - - # Done; return the response. - return response - - def get_entity_type(self, - request: Union[featurestore_service.GetEntityTypeRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> entity_type.EntityType: - r"""Gets details of a single EntityType. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.GetEntityTypeRequest, dict]): - The request object. Request message for - [FeaturestoreService.GetEntityType][google.cloud.aiplatform.v1beta1.FeaturestoreService.GetEntityType]. - name (str): - Required. The name of the EntityType resource. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.EntityType: - An entity type is a type of object in - a system that needs to be modeled and - have stored information about. For - example, driver is an entity type, and - driver0 is an instance of an entity type - driver. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_service.GetEntityTypeRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_service.GetEntityTypeRequest): - request = featurestore_service.GetEntityTypeRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_entity_type] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_entity_types(self, - request: Union[featurestore_service.ListEntityTypesRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListEntityTypesPager: - r"""Lists EntityTypes in a given Featurestore. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListEntityTypesRequest, dict]): - The request object. Request message for - [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListEntityTypes]. - parent (str): - Required. The resource name of the Featurestore to list - EntityTypes. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.ListEntityTypesPager: - Response message for - [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListEntityTypes]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_service.ListEntityTypesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_service.ListEntityTypesRequest): - request = featurestore_service.ListEntityTypesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_entity_types] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListEntityTypesPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_entity_type(self, - request: Union[featurestore_service.UpdateEntityTypeRequest, dict] = None, - *, - entity_type: gca_entity_type.EntityType = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_entity_type.EntityType: - r"""Updates the parameters of a single EntityType. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.UpdateEntityTypeRequest, dict]): - The request object. Request message for - [FeaturestoreService.UpdateEntityType][google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateEntityType]. - entity_type (google.cloud.aiplatform_v1beta1.types.EntityType): - Required. The EntityType's ``name`` field is used to - identify the EntityType to be updated. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - - This corresponds to the ``entity_type`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Field mask is used to specify the fields to be - overwritten in the EntityType resource by the update. - The fields specified in the update_mask are relative to - the resource, not the full request. A field will be - overwritten if it is in the mask. If the user does not - provide a mask then only the non-empty fields present in - the request will be overwritten. Set the update_mask to - ``*`` to override all fields. - - Updatable fields: - - - ``description`` - - ``labels`` - - ``monitoring_config.snapshot_analysis.disabled`` - - ``monitoring_config.snapshot_analysis.monitoring_interval`` - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.EntityType: - An entity type is a type of object in - a system that needs to be modeled and - have stored information about. For - example, driver is an entity type, and - driver0 is an instance of an entity type - driver. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([entity_type, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_service.UpdateEntityTypeRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_service.UpdateEntityTypeRequest): - request = featurestore_service.UpdateEntityTypeRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if entity_type is not None: - request.entity_type = entity_type - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_entity_type] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("entity_type.name", request.entity_type.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_entity_type(self, - request: Union[featurestore_service.DeleteEntityTypeRequest, dict] = None, - *, - name: str = None, - force: bool = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a single EntityType. The EntityType must not have any - Features or ``force`` must be set to true for the request to - succeed. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.DeleteEntityTypeRequest, dict]): - The request object. Request message for - [FeaturestoreService.DeleteEntityTypes][]. - name (str): - Required. The name of the EntityType to be deleted. - Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - force (bool): - If set to true, any Features for this - EntityType will also be deleted. - (Otherwise, the request will only work - if the EntityType has no Features.) - - This corresponds to the ``force`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, force]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_service.DeleteEntityTypeRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_service.DeleteEntityTypeRequest): - request = featurestore_service.DeleteEntityTypeRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if force is not None: - request.force = force - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_entity_type] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def create_feature(self, - request: Union[featurestore_service.CreateFeatureRequest, dict] = None, - *, - parent: str = None, - feature: gca_feature.Feature = None, - feature_id: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Creates a new Feature in a given EntityType. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CreateFeatureRequest, dict]): - The request object. Request message for - [FeaturestoreService.CreateFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateFeature]. - parent (str): - Required. The resource name of the EntityType to create - a Feature. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - feature (google.cloud.aiplatform_v1beta1.types.Feature): - Required. The Feature to create. - This corresponds to the ``feature`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - feature_id (str): - Required. The ID to use for the Feature, which will - become the final component of the Feature's resource - name. - - This value may be up to 60 characters, and valid - characters are ``[a-z0-9_]``. The first character cannot - be a number. - - The value must be unique within an EntityType. - - This corresponds to the ``feature_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Feature` Feature Metadata information that describes an attribute of an entity type. - For example, apple is an entity type, and color is a - feature that describes apple. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, feature, feature_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_service.CreateFeatureRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_service.CreateFeatureRequest): - request = featurestore_service.CreateFeatureRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if feature is not None: - request.feature = feature - if feature_id is not None: - request.feature_id = feature_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_feature] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - gca_feature.Feature, - metadata_type=featurestore_service.CreateFeatureOperationMetadata, - ) - - # Done; return the response. - return response - - def batch_create_features(self, - request: Union[featurestore_service.BatchCreateFeaturesRequest, dict] = None, - *, - parent: str = None, - requests: Sequence[featurestore_service.CreateFeatureRequest] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Creates a batch of Features in a given EntityType. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.BatchCreateFeaturesRequest, dict]): - The request object. Request message for - [FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchCreateFeatures]. - parent (str): - Required. The resource name of the EntityType to create - the batch of Features under. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - requests (Sequence[google.cloud.aiplatform_v1beta1.types.CreateFeatureRequest]): - Required. The request message specifying the Features to - create. All Features must be created under the same - parent EntityType. The ``parent`` field in each child - request message can be omitted. If ``parent`` is set in - a child request, then the value must match the - ``parent`` value in this request message. - - This corresponds to the ``requests`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.BatchCreateFeaturesResponse` - Response message for - [FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchCreateFeatures]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, requests]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_service.BatchCreateFeaturesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_service.BatchCreateFeaturesRequest): - request = featurestore_service.BatchCreateFeaturesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if requests is not None: - request.requests = requests - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.batch_create_features] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - featurestore_service.BatchCreateFeaturesResponse, - metadata_type=featurestore_service.BatchCreateFeaturesOperationMetadata, - ) - - # Done; return the response. - return response - - def get_feature(self, - request: Union[featurestore_service.GetFeatureRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> feature.Feature: - r"""Gets details of a single Feature. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.GetFeatureRequest, dict]): - The request object. Request message for - [FeaturestoreService.GetFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.GetFeature]. - name (str): - Required. The name of the Feature resource. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Feature: - Feature Metadata information that - describes an attribute of an entity - type. For example, apple is an entity - type, and color is a feature that - describes apple. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_service.GetFeatureRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_service.GetFeatureRequest): - request = featurestore_service.GetFeatureRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_feature] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_features(self, - request: Union[featurestore_service.ListFeaturesRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListFeaturesPager: - r"""Lists Features in a given EntityType. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListFeaturesRequest, dict]): - The request object. Request message for - [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeatures]. - parent (str): - Required. The resource name of the Location to list - Features. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.ListFeaturesPager: - Response message for - [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeatures]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_service.ListFeaturesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_service.ListFeaturesRequest): - request = featurestore_service.ListFeaturesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_features] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListFeaturesPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_feature(self, - request: Union[featurestore_service.UpdateFeatureRequest, dict] = None, - *, - feature: gca_feature.Feature = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_feature.Feature: - r"""Updates the parameters of a single Feature. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.UpdateFeatureRequest, dict]): - The request object. Request message for - [FeaturestoreService.UpdateFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateFeature]. - feature (google.cloud.aiplatform_v1beta1.types.Feature): - Required. The Feature's ``name`` field is used to - identify the Feature to be updated. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}`` - - This corresponds to the ``feature`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Field mask is used to specify the fields to be - overwritten in the Features resource by the update. The - fields specified in the update_mask are relative to the - resource, not the full request. A field will be - overwritten if it is in the mask. If the user does not - provide a mask then only the non-empty fields present in - the request will be overwritten. Set the update_mask to - ``*`` to override all fields. - - Updatable fields: - - - ``description`` - - ``labels`` - - ``monitoring_config.snapshot_analysis.disabled`` - - ``monitoring_config.snapshot_analysis.monitoring_interval`` - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Feature: - Feature Metadata information that - describes an attribute of an entity - type. For example, apple is an entity - type, and color is a feature that - describes apple. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([feature, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_service.UpdateFeatureRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_service.UpdateFeatureRequest): - request = featurestore_service.UpdateFeatureRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if feature is not None: - request.feature = feature - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_feature] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("feature.name", request.feature.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_feature(self, - request: Union[featurestore_service.DeleteFeatureRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a single Feature. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.DeleteFeatureRequest, dict]): - The request object. Request message for - [FeaturestoreService.DeleteFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteFeature]. - name (str): - Required. The name of the Features to be deleted. - Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_service.DeleteFeatureRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_service.DeleteFeatureRequest): - request = featurestore_service.DeleteFeatureRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_feature] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def import_feature_values(self, - request: Union[featurestore_service.ImportFeatureValuesRequest, dict] = None, - *, - entity_type: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Imports Feature values into the Featurestore from a - source storage. - The progress of the import is tracked by the returned - operation. The imported features are guaranteed to be - visible to subsequent read operations after the - operation is marked as successfully done. - If an import operation fails, the Feature values - returned from reads and exports may be inconsistent. If - consistency is required, the caller must retry the same - import request again and wait till the new operation - returned is marked as successfully done. - There are also scenarios where the caller can cause - inconsistency. - - Source data for import contains multiple distinct - Feature values for the same entity ID and timestamp. - - Source is modified during an import. This includes - adding, updating, or removing source data and/or - metadata. Examples of updating metadata include but are - not limited to changing storage location, storage class, - or retention policy. - - Online serving cluster is under-provisioned. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ImportFeatureValuesRequest, dict]): - The request object. Request message for - [FeaturestoreService.ImportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ImportFeatureValues]. - entity_type (str): - Required. The resource name of the EntityType grouping - the Features for which values are being imported. - Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}`` - - This corresponds to the ``entity_type`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.ImportFeatureValuesResponse` - Response message for - [FeaturestoreService.ImportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ImportFeatureValues]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([entity_type]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_service.ImportFeatureValuesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_service.ImportFeatureValuesRequest): - request = featurestore_service.ImportFeatureValuesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if entity_type is not None: - request.entity_type = entity_type - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.import_feature_values] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("entity_type", request.entity_type), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - featurestore_service.ImportFeatureValuesResponse, - metadata_type=featurestore_service.ImportFeatureValuesOperationMetadata, - ) - - # Done; return the response. - return response - - def batch_read_feature_values(self, - request: Union[featurestore_service.BatchReadFeatureValuesRequest, dict] = None, - *, - featurestore: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Batch reads Feature values from a Featurestore. - This API enables batch reading Feature values, where - each read instance in the batch may read Feature values - of entities from one or more EntityTypes. Point-in-time - correctness is guaranteed for Feature values of each - read instance as of each instance's read timestamp. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.BatchReadFeatureValuesRequest, dict]): - The request object. Request message for - [FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchReadFeatureValues]. - featurestore (str): - Required. The resource name of the Featurestore from - which to query Feature values. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}`` - - This corresponds to the ``featurestore`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.BatchReadFeatureValuesResponse` - Response message for - [FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchReadFeatureValues]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([featurestore]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_service.BatchReadFeatureValuesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_service.BatchReadFeatureValuesRequest): - request = featurestore_service.BatchReadFeatureValuesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if featurestore is not None: - request.featurestore = featurestore - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.batch_read_feature_values] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("featurestore", request.featurestore), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - featurestore_service.BatchReadFeatureValuesResponse, - metadata_type=featurestore_service.BatchReadFeatureValuesOperationMetadata, - ) - - # Done; return the response. - return response - - def export_feature_values(self, - request: Union[featurestore_service.ExportFeatureValuesRequest, dict] = None, - *, - entity_type: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Exports Feature values from all the entities of a - target EntityType. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ExportFeatureValuesRequest, dict]): - The request object. Request message for - [FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ExportFeatureValues]. - entity_type (str): - Required. The resource name of the EntityType from which - to export Feature values. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - - This corresponds to the ``entity_type`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.ExportFeatureValuesResponse` - Response message for - [FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ExportFeatureValues]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([entity_type]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_service.ExportFeatureValuesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_service.ExportFeatureValuesRequest): - request = featurestore_service.ExportFeatureValuesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if entity_type is not None: - request.entity_type = entity_type - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.export_feature_values] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("entity_type", request.entity_type), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - featurestore_service.ExportFeatureValuesResponse, - metadata_type=featurestore_service.ExportFeatureValuesOperationMetadata, - ) - - # Done; return the response. - return response - - def search_features(self, - request: Union[featurestore_service.SearchFeaturesRequest, dict] = None, - *, - location: str = None, - query: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.SearchFeaturesPager: - r"""Searches Features matching a query in a given - project. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.SearchFeaturesRequest, dict]): - The request object. Request message for - [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.SearchFeatures]. - location (str): - Required. The resource name of the Location to search - Features. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``location`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - query (str): - Query string that is a conjunction of field-restricted - queries and/or field-restricted filters. - Field-restricted queries and filters can be combined - using ``AND`` to form a conjunction. - - A field query is in the form FIELD:QUERY. This - implicitly checks if QUERY exists as a substring within - Feature's FIELD. The QUERY and the FIELD are converted - to a sequence of words (i.e. tokens) for comparison. - This is done by: - - - Removing leading/trailing whitespace and tokenizing - the search value. Characters that are not one of - alphanumeric ``[a-zA-Z0-9]``, underscore ``_``, or - asterisk ``*`` are treated as delimiters for tokens. - ``*`` is treated as a wildcard that matches - characters within a token. - - Ignoring case. - - Prepending an asterisk to the first and appending an - asterisk to the last token in QUERY. - - A QUERY must be either a singular token or a phrase. A - phrase is one or multiple words enclosed in double - quotation marks ("). With phrases, the order of the - words is important. Words in the phrase must be matching - in order and consecutively. - - Supported FIELDs for field-restricted queries: - - - ``feature_id`` - - ``description`` - - ``entity_type_id`` - - Examples: - - - ``feature_id: foo`` --> Matches a Feature with ID - containing the substring ``foo`` (eg. ``foo``, - ``foofeature``, ``barfoo``). - - ``feature_id: foo*feature`` --> Matches a Feature - with ID containing the substring ``foo*feature`` (eg. - ``foobarfeature``). - - ``feature_id: foo AND description: bar`` --> Matches - a Feature with ID containing the substring ``foo`` - and description containing the substring ``bar``. - - Besides field queries, the following exact-match filters - are supported. The exact-match filters do not support - wildcards. Unlike field-restricted queries, exact-match - filters are case-sensitive. - - - ``feature_id``: Supports = comparisons. - - ``description``: Supports = comparisons. Multi-token - filters should be enclosed in quotes. - - ``entity_type_id``: Supports = comparisons. - - ``value_type``: Supports = and != comparisons. - - ``labels``: Supports key-value equality as well as - key presence. - - ``featurestore_id``: Supports = comparisons. - - Examples: - - - ``description = "foo bar"`` --> Any Feature with - description exactly equal to ``foo bar`` - - ``value_type = DOUBLE`` --> Features whose type is - DOUBLE. - - ``labels.active = yes AND labels.env = prod`` --> - Features having both (active: yes) and (env: prod) - labels. - - ``labels.env: *`` --> Any Feature which has a label - with ``env`` as the key. - - This corresponds to the ``query`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.SearchFeaturesPager: - Response message for - [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.SearchFeatures]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([location, query]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a featurestore_service.SearchFeaturesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, featurestore_service.SearchFeaturesRequest): - request = featurestore_service.SearchFeaturesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if location is not None: - request.location = location - if query is not None: - request.query = query - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.search_features] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("location", request.location), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.SearchFeaturesPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - """Releases underlying transport's resources. - - .. warning:: - ONLY use as a context manager if the transport is NOT shared - with other clients! Exiting the with block will CLOSE the transport - and may cause errors in other clients! - """ - self.transport.close() - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "FeaturestoreServiceClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/pagers.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/pagers.py deleted file mode 100644 index 5345c188e7..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/pagers.py +++ /dev/null @@ -1,509 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator - -from google.cloud.aiplatform_v1beta1.types import entity_type -from google.cloud.aiplatform_v1beta1.types import feature -from google.cloud.aiplatform_v1beta1.types import featurestore -from google.cloud.aiplatform_v1beta1.types import featurestore_service - - -class ListFeaturestoresPager: - """A pager for iterating through ``list_featurestores`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListFeaturestoresResponse` object, and - provides an ``__iter__`` method to iterate through its - ``featurestores`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListFeaturestores`` requests and continue to iterate - through the ``featurestores`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListFeaturestoresResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., featurestore_service.ListFeaturestoresResponse], - request: featurestore_service.ListFeaturestoresRequest, - response: featurestore_service.ListFeaturestoresResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListFeaturestoresRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListFeaturestoresResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = featurestore_service.ListFeaturestoresRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[featurestore_service.ListFeaturestoresResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[featurestore.Featurestore]: - for page in self.pages: - yield from page.featurestores - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListFeaturestoresAsyncPager: - """A pager for iterating through ``list_featurestores`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListFeaturestoresResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``featurestores`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListFeaturestores`` requests and continue to iterate - through the ``featurestores`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListFeaturestoresResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[featurestore_service.ListFeaturestoresResponse]], - request: featurestore_service.ListFeaturestoresRequest, - response: featurestore_service.ListFeaturestoresResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListFeaturestoresRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListFeaturestoresResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = featurestore_service.ListFeaturestoresRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[featurestore_service.ListFeaturestoresResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[featurestore.Featurestore]: - async def async_generator(): - async for page in self.pages: - for response in page.featurestores: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListEntityTypesPager: - """A pager for iterating through ``list_entity_types`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListEntityTypesResponse` object, and - provides an ``__iter__`` method to iterate through its - ``entity_types`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListEntityTypes`` requests and continue to iterate - through the ``entity_types`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListEntityTypesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., featurestore_service.ListEntityTypesResponse], - request: featurestore_service.ListEntityTypesRequest, - response: featurestore_service.ListEntityTypesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListEntityTypesRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListEntityTypesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = featurestore_service.ListEntityTypesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[featurestore_service.ListEntityTypesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[entity_type.EntityType]: - for page in self.pages: - yield from page.entity_types - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListEntityTypesAsyncPager: - """A pager for iterating through ``list_entity_types`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListEntityTypesResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``entity_types`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListEntityTypes`` requests and continue to iterate - through the ``entity_types`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListEntityTypesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[featurestore_service.ListEntityTypesResponse]], - request: featurestore_service.ListEntityTypesRequest, - response: featurestore_service.ListEntityTypesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListEntityTypesRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListEntityTypesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = featurestore_service.ListEntityTypesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[featurestore_service.ListEntityTypesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[entity_type.EntityType]: - async def async_generator(): - async for page in self.pages: - for response in page.entity_types: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListFeaturesPager: - """A pager for iterating through ``list_features`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListFeaturesResponse` object, and - provides an ``__iter__`` method to iterate through its - ``features`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListFeatures`` requests and continue to iterate - through the ``features`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListFeaturesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., featurestore_service.ListFeaturesResponse], - request: featurestore_service.ListFeaturesRequest, - response: featurestore_service.ListFeaturesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListFeaturesRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListFeaturesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = featurestore_service.ListFeaturesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[featurestore_service.ListFeaturesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[feature.Feature]: - for page in self.pages: - yield from page.features - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListFeaturesAsyncPager: - """A pager for iterating through ``list_features`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListFeaturesResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``features`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListFeatures`` requests and continue to iterate - through the ``features`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListFeaturesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[featurestore_service.ListFeaturesResponse]], - request: featurestore_service.ListFeaturesRequest, - response: featurestore_service.ListFeaturesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListFeaturesRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListFeaturesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = featurestore_service.ListFeaturesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[featurestore_service.ListFeaturesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[feature.Feature]: - async def async_generator(): - async for page in self.pages: - for response in page.features: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class SearchFeaturesPager: - """A pager for iterating through ``search_features`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.SearchFeaturesResponse` object, and - provides an ``__iter__`` method to iterate through its - ``features`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``SearchFeatures`` requests and continue to iterate - through the ``features`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.SearchFeaturesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., featurestore_service.SearchFeaturesResponse], - request: featurestore_service.SearchFeaturesRequest, - response: featurestore_service.SearchFeaturesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.SearchFeaturesRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.SearchFeaturesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = featurestore_service.SearchFeaturesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[featurestore_service.SearchFeaturesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[feature.Feature]: - for page in self.pages: - yield from page.features - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class SearchFeaturesAsyncPager: - """A pager for iterating through ``search_features`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.SearchFeaturesResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``features`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``SearchFeatures`` requests and continue to iterate - through the ``features`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.SearchFeaturesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[featurestore_service.SearchFeaturesResponse]], - request: featurestore_service.SearchFeaturesRequest, - response: featurestore_service.SearchFeaturesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.SearchFeaturesRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.SearchFeaturesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = featurestore_service.SearchFeaturesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[featurestore_service.SearchFeaturesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[feature.Feature]: - async def async_generator(): - async for page in self.pages: - for response in page.features: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/__init__.py deleted file mode 100644 index e8a1ff1b03..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import FeaturestoreServiceTransport -from .grpc import FeaturestoreServiceGrpcTransport -from .grpc_asyncio import FeaturestoreServiceGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[FeaturestoreServiceTransport]] -_transport_registry['grpc'] = FeaturestoreServiceGrpcTransport -_transport_registry['grpc_asyncio'] = FeaturestoreServiceGrpcAsyncIOTransport - -__all__ = ( - 'FeaturestoreServiceTransport', - 'FeaturestoreServiceGrpcTransport', - 'FeaturestoreServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/base.py deleted file mode 100644 index 37f11f8160..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/base.py +++ /dev/null @@ -1,424 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import pkg_resources - -import google.auth # type: ignore -import google.api_core -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.aiplatform_v1beta1.types import entity_type -from google.cloud.aiplatform_v1beta1.types import entity_type as gca_entity_type -from google.cloud.aiplatform_v1beta1.types import feature -from google.cloud.aiplatform_v1beta1.types import feature as gca_feature -from google.cloud.aiplatform_v1beta1.types import featurestore -from google.cloud.aiplatform_v1beta1.types import featurestore_service -from google.longrunning import operations_pb2 # type: ignore - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-aiplatform', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -class FeaturestoreServiceTransport(abc.ABC): - """Abstract transport class for FeaturestoreService.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'aiplatform.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials are service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.create_featurestore: gapic_v1.method.wrap_method( - self.create_featurestore, - default_timeout=5.0, - client_info=client_info, - ), - self.get_featurestore: gapic_v1.method.wrap_method( - self.get_featurestore, - default_timeout=5.0, - client_info=client_info, - ), - self.list_featurestores: gapic_v1.method.wrap_method( - self.list_featurestores, - default_timeout=5.0, - client_info=client_info, - ), - self.update_featurestore: gapic_v1.method.wrap_method( - self.update_featurestore, - default_timeout=5.0, - client_info=client_info, - ), - self.delete_featurestore: gapic_v1.method.wrap_method( - self.delete_featurestore, - default_timeout=5.0, - client_info=client_info, - ), - self.create_entity_type: gapic_v1.method.wrap_method( - self.create_entity_type, - default_timeout=5.0, - client_info=client_info, - ), - self.get_entity_type: gapic_v1.method.wrap_method( - self.get_entity_type, - default_timeout=5.0, - client_info=client_info, - ), - self.list_entity_types: gapic_v1.method.wrap_method( - self.list_entity_types, - default_timeout=5.0, - client_info=client_info, - ), - self.update_entity_type: gapic_v1.method.wrap_method( - self.update_entity_type, - default_timeout=5.0, - client_info=client_info, - ), - self.delete_entity_type: gapic_v1.method.wrap_method( - self.delete_entity_type, - default_timeout=5.0, - client_info=client_info, - ), - self.create_feature: gapic_v1.method.wrap_method( - self.create_feature, - default_timeout=5.0, - client_info=client_info, - ), - self.batch_create_features: gapic_v1.method.wrap_method( - self.batch_create_features, - default_timeout=5.0, - client_info=client_info, - ), - self.get_feature: gapic_v1.method.wrap_method( - self.get_feature, - default_timeout=5.0, - client_info=client_info, - ), - self.list_features: gapic_v1.method.wrap_method( - self.list_features, - default_timeout=5.0, - client_info=client_info, - ), - self.update_feature: gapic_v1.method.wrap_method( - self.update_feature, - default_timeout=5.0, - client_info=client_info, - ), - self.delete_feature: gapic_v1.method.wrap_method( - self.delete_feature, - default_timeout=5.0, - client_info=client_info, - ), - self.import_feature_values: gapic_v1.method.wrap_method( - self.import_feature_values, - default_timeout=5.0, - client_info=client_info, - ), - self.batch_read_feature_values: gapic_v1.method.wrap_method( - self.batch_read_feature_values, - default_timeout=5.0, - client_info=client_info, - ), - self.export_feature_values: gapic_v1.method.wrap_method( - self.export_feature_values, - default_timeout=None, - client_info=client_info, - ), - self.search_features: gapic_v1.method.wrap_method( - self.search_features, - default_timeout=5.0, - client_info=client_info, - ), - } - - def close(self): - """Closes resources associated with the transport. - - .. warning:: - Only call this method if the transport is NOT shared - with other clients - this may cause errors in other clients! - """ - raise NotImplementedError() - - @property - def operations_client(self): - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def create_featurestore(self) -> Callable[ - [featurestore_service.CreateFeaturestoreRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def get_featurestore(self) -> Callable[ - [featurestore_service.GetFeaturestoreRequest], - Union[ - featurestore.Featurestore, - Awaitable[featurestore.Featurestore] - ]]: - raise NotImplementedError() - - @property - def list_featurestores(self) -> Callable[ - [featurestore_service.ListFeaturestoresRequest], - Union[ - featurestore_service.ListFeaturestoresResponse, - Awaitable[featurestore_service.ListFeaturestoresResponse] - ]]: - raise NotImplementedError() - - @property - def update_featurestore(self) -> Callable[ - [featurestore_service.UpdateFeaturestoreRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def delete_featurestore(self) -> Callable[ - [featurestore_service.DeleteFeaturestoreRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def create_entity_type(self) -> Callable[ - [featurestore_service.CreateEntityTypeRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def get_entity_type(self) -> Callable[ - [featurestore_service.GetEntityTypeRequest], - Union[ - entity_type.EntityType, - Awaitable[entity_type.EntityType] - ]]: - raise NotImplementedError() - - @property - def list_entity_types(self) -> Callable[ - [featurestore_service.ListEntityTypesRequest], - Union[ - featurestore_service.ListEntityTypesResponse, - Awaitable[featurestore_service.ListEntityTypesResponse] - ]]: - raise NotImplementedError() - - @property - def update_entity_type(self) -> Callable[ - [featurestore_service.UpdateEntityTypeRequest], - Union[ - gca_entity_type.EntityType, - Awaitable[gca_entity_type.EntityType] - ]]: - raise NotImplementedError() - - @property - def delete_entity_type(self) -> Callable[ - [featurestore_service.DeleteEntityTypeRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def create_feature(self) -> Callable[ - [featurestore_service.CreateFeatureRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def batch_create_features(self) -> Callable[ - [featurestore_service.BatchCreateFeaturesRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def get_feature(self) -> Callable[ - [featurestore_service.GetFeatureRequest], - Union[ - feature.Feature, - Awaitable[feature.Feature] - ]]: - raise NotImplementedError() - - @property - def list_features(self) -> Callable[ - [featurestore_service.ListFeaturesRequest], - Union[ - featurestore_service.ListFeaturesResponse, - Awaitable[featurestore_service.ListFeaturesResponse] - ]]: - raise NotImplementedError() - - @property - def update_feature(self) -> Callable[ - [featurestore_service.UpdateFeatureRequest], - Union[ - gca_feature.Feature, - Awaitable[gca_feature.Feature] - ]]: - raise NotImplementedError() - - @property - def delete_feature(self) -> Callable[ - [featurestore_service.DeleteFeatureRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def import_feature_values(self) -> Callable[ - [featurestore_service.ImportFeatureValuesRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def batch_read_feature_values(self) -> Callable[ - [featurestore_service.BatchReadFeatureValuesRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def export_feature_values(self) -> Callable[ - [featurestore_service.ExportFeatureValuesRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def search_features(self) -> Callable[ - [featurestore_service.SearchFeaturesRequest], - Union[ - featurestore_service.SearchFeaturesResponse, - Awaitable[featurestore_service.SearchFeaturesResponse] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'FeaturestoreServiceTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc.py deleted file mode 100644 index 66d4cc5c69..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc.py +++ /dev/null @@ -1,805 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers -from google.api_core import operations_v1 -from google.api_core import gapic_v1 -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.aiplatform_v1beta1.types import entity_type -from google.cloud.aiplatform_v1beta1.types import entity_type as gca_entity_type -from google.cloud.aiplatform_v1beta1.types import feature -from google.cloud.aiplatform_v1beta1.types import feature as gca_feature -from google.cloud.aiplatform_v1beta1.types import featurestore -from google.cloud.aiplatform_v1beta1.types import featurestore_service -from google.longrunning import operations_pb2 # type: ignore -from .base import FeaturestoreServiceTransport, DEFAULT_CLIENT_INFO - - -class FeaturestoreServiceGrpcTransport(FeaturestoreServiceTransport): - """gRPC backend transport for FeaturestoreService. - - The service that handles CRUD and List for resources for - Featurestore. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_featurestore(self) -> Callable[ - [featurestore_service.CreateFeaturestoreRequest], - operations_pb2.Operation]: - r"""Return a callable for the create featurestore method over gRPC. - - Creates a new Featurestore in a given project and - location. - - Returns: - Callable[[~.CreateFeaturestoreRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_featurestore' not in self._stubs: - self._stubs['create_featurestore'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/CreateFeaturestore', - request_serializer=featurestore_service.CreateFeaturestoreRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_featurestore'] - - @property - def get_featurestore(self) -> Callable[ - [featurestore_service.GetFeaturestoreRequest], - featurestore.Featurestore]: - r"""Return a callable for the get featurestore method over gRPC. - - Gets details of a single Featurestore. - - Returns: - Callable[[~.GetFeaturestoreRequest], - ~.Featurestore]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_featurestore' not in self._stubs: - self._stubs['get_featurestore'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/GetFeaturestore', - request_serializer=featurestore_service.GetFeaturestoreRequest.serialize, - response_deserializer=featurestore.Featurestore.deserialize, - ) - return self._stubs['get_featurestore'] - - @property - def list_featurestores(self) -> Callable[ - [featurestore_service.ListFeaturestoresRequest], - featurestore_service.ListFeaturestoresResponse]: - r"""Return a callable for the list featurestores method over gRPC. - - Lists Featurestores in a given project and location. - - Returns: - Callable[[~.ListFeaturestoresRequest], - ~.ListFeaturestoresResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_featurestores' not in self._stubs: - self._stubs['list_featurestores'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/ListFeaturestores', - request_serializer=featurestore_service.ListFeaturestoresRequest.serialize, - response_deserializer=featurestore_service.ListFeaturestoresResponse.deserialize, - ) - return self._stubs['list_featurestores'] - - @property - def update_featurestore(self) -> Callable[ - [featurestore_service.UpdateFeaturestoreRequest], - operations_pb2.Operation]: - r"""Return a callable for the update featurestore method over gRPC. - - Updates the parameters of a single Featurestore. - - Returns: - Callable[[~.UpdateFeaturestoreRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_featurestore' not in self._stubs: - self._stubs['update_featurestore'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/UpdateFeaturestore', - request_serializer=featurestore_service.UpdateFeaturestoreRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['update_featurestore'] - - @property - def delete_featurestore(self) -> Callable[ - [featurestore_service.DeleteFeaturestoreRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete featurestore method over gRPC. - - Deletes a single Featurestore. The Featurestore must not contain - any EntityTypes or ``force`` must be set to true for the request - to succeed. - - Returns: - Callable[[~.DeleteFeaturestoreRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_featurestore' not in self._stubs: - self._stubs['delete_featurestore'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/DeleteFeaturestore', - request_serializer=featurestore_service.DeleteFeaturestoreRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_featurestore'] - - @property - def create_entity_type(self) -> Callable[ - [featurestore_service.CreateEntityTypeRequest], - operations_pb2.Operation]: - r"""Return a callable for the create entity type method over gRPC. - - Creates a new EntityType in a given Featurestore. - - Returns: - Callable[[~.CreateEntityTypeRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_entity_type' not in self._stubs: - self._stubs['create_entity_type'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/CreateEntityType', - request_serializer=featurestore_service.CreateEntityTypeRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_entity_type'] - - @property - def get_entity_type(self) -> Callable[ - [featurestore_service.GetEntityTypeRequest], - entity_type.EntityType]: - r"""Return a callable for the get entity type method over gRPC. - - Gets details of a single EntityType. - - Returns: - Callable[[~.GetEntityTypeRequest], - ~.EntityType]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_entity_type' not in self._stubs: - self._stubs['get_entity_type'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/GetEntityType', - request_serializer=featurestore_service.GetEntityTypeRequest.serialize, - response_deserializer=entity_type.EntityType.deserialize, - ) - return self._stubs['get_entity_type'] - - @property - def list_entity_types(self) -> Callable[ - [featurestore_service.ListEntityTypesRequest], - featurestore_service.ListEntityTypesResponse]: - r"""Return a callable for the list entity types method over gRPC. - - Lists EntityTypes in a given Featurestore. - - Returns: - Callable[[~.ListEntityTypesRequest], - ~.ListEntityTypesResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_entity_types' not in self._stubs: - self._stubs['list_entity_types'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/ListEntityTypes', - request_serializer=featurestore_service.ListEntityTypesRequest.serialize, - response_deserializer=featurestore_service.ListEntityTypesResponse.deserialize, - ) - return self._stubs['list_entity_types'] - - @property - def update_entity_type(self) -> Callable[ - [featurestore_service.UpdateEntityTypeRequest], - gca_entity_type.EntityType]: - r"""Return a callable for the update entity type method over gRPC. - - Updates the parameters of a single EntityType. - - Returns: - Callable[[~.UpdateEntityTypeRequest], - ~.EntityType]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_entity_type' not in self._stubs: - self._stubs['update_entity_type'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/UpdateEntityType', - request_serializer=featurestore_service.UpdateEntityTypeRequest.serialize, - response_deserializer=gca_entity_type.EntityType.deserialize, - ) - return self._stubs['update_entity_type'] - - @property - def delete_entity_type(self) -> Callable[ - [featurestore_service.DeleteEntityTypeRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete entity type method over gRPC. - - Deletes a single EntityType. The EntityType must not have any - Features or ``force`` must be set to true for the request to - succeed. - - Returns: - Callable[[~.DeleteEntityTypeRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_entity_type' not in self._stubs: - self._stubs['delete_entity_type'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/DeleteEntityType', - request_serializer=featurestore_service.DeleteEntityTypeRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_entity_type'] - - @property - def create_feature(self) -> Callable[ - [featurestore_service.CreateFeatureRequest], - operations_pb2.Operation]: - r"""Return a callable for the create feature method over gRPC. - - Creates a new Feature in a given EntityType. - - Returns: - Callable[[~.CreateFeatureRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_feature' not in self._stubs: - self._stubs['create_feature'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/CreateFeature', - request_serializer=featurestore_service.CreateFeatureRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_feature'] - - @property - def batch_create_features(self) -> Callable[ - [featurestore_service.BatchCreateFeaturesRequest], - operations_pb2.Operation]: - r"""Return a callable for the batch create features method over gRPC. - - Creates a batch of Features in a given EntityType. - - Returns: - Callable[[~.BatchCreateFeaturesRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'batch_create_features' not in self._stubs: - self._stubs['batch_create_features'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/BatchCreateFeatures', - request_serializer=featurestore_service.BatchCreateFeaturesRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['batch_create_features'] - - @property - def get_feature(self) -> Callable[ - [featurestore_service.GetFeatureRequest], - feature.Feature]: - r"""Return a callable for the get feature method over gRPC. - - Gets details of a single Feature. - - Returns: - Callable[[~.GetFeatureRequest], - ~.Feature]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_feature' not in self._stubs: - self._stubs['get_feature'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/GetFeature', - request_serializer=featurestore_service.GetFeatureRequest.serialize, - response_deserializer=feature.Feature.deserialize, - ) - return self._stubs['get_feature'] - - @property - def list_features(self) -> Callable[ - [featurestore_service.ListFeaturesRequest], - featurestore_service.ListFeaturesResponse]: - r"""Return a callable for the list features method over gRPC. - - Lists Features in a given EntityType. - - Returns: - Callable[[~.ListFeaturesRequest], - ~.ListFeaturesResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_features' not in self._stubs: - self._stubs['list_features'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/ListFeatures', - request_serializer=featurestore_service.ListFeaturesRequest.serialize, - response_deserializer=featurestore_service.ListFeaturesResponse.deserialize, - ) - return self._stubs['list_features'] - - @property - def update_feature(self) -> Callable[ - [featurestore_service.UpdateFeatureRequest], - gca_feature.Feature]: - r"""Return a callable for the update feature method over gRPC. - - Updates the parameters of a single Feature. - - Returns: - Callable[[~.UpdateFeatureRequest], - ~.Feature]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_feature' not in self._stubs: - self._stubs['update_feature'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/UpdateFeature', - request_serializer=featurestore_service.UpdateFeatureRequest.serialize, - response_deserializer=gca_feature.Feature.deserialize, - ) - return self._stubs['update_feature'] - - @property - def delete_feature(self) -> Callable[ - [featurestore_service.DeleteFeatureRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete feature method over gRPC. - - Deletes a single Feature. - - Returns: - Callable[[~.DeleteFeatureRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_feature' not in self._stubs: - self._stubs['delete_feature'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/DeleteFeature', - request_serializer=featurestore_service.DeleteFeatureRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_feature'] - - @property - def import_feature_values(self) -> Callable[ - [featurestore_service.ImportFeatureValuesRequest], - operations_pb2.Operation]: - r"""Return a callable for the import feature values method over gRPC. - - Imports Feature values into the Featurestore from a - source storage. - The progress of the import is tracked by the returned - operation. The imported features are guaranteed to be - visible to subsequent read operations after the - operation is marked as successfully done. - If an import operation fails, the Feature values - returned from reads and exports may be inconsistent. If - consistency is required, the caller must retry the same - import request again and wait till the new operation - returned is marked as successfully done. - There are also scenarios where the caller can cause - inconsistency. - - Source data for import contains multiple distinct - Feature values for the same entity ID and timestamp. - - Source is modified during an import. This includes - adding, updating, or removing source data and/or - metadata. Examples of updating metadata include but are - not limited to changing storage location, storage class, - or retention policy. - - Online serving cluster is under-provisioned. - - Returns: - Callable[[~.ImportFeatureValuesRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'import_feature_values' not in self._stubs: - self._stubs['import_feature_values'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/ImportFeatureValues', - request_serializer=featurestore_service.ImportFeatureValuesRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['import_feature_values'] - - @property - def batch_read_feature_values(self) -> Callable[ - [featurestore_service.BatchReadFeatureValuesRequest], - operations_pb2.Operation]: - r"""Return a callable for the batch read feature values method over gRPC. - - Batch reads Feature values from a Featurestore. - This API enables batch reading Feature values, where - each read instance in the batch may read Feature values - of entities from one or more EntityTypes. Point-in-time - correctness is guaranteed for Feature values of each - read instance as of each instance's read timestamp. - - Returns: - Callable[[~.BatchReadFeatureValuesRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'batch_read_feature_values' not in self._stubs: - self._stubs['batch_read_feature_values'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/BatchReadFeatureValues', - request_serializer=featurestore_service.BatchReadFeatureValuesRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['batch_read_feature_values'] - - @property - def export_feature_values(self) -> Callable[ - [featurestore_service.ExportFeatureValuesRequest], - operations_pb2.Operation]: - r"""Return a callable for the export feature values method over gRPC. - - Exports Feature values from all the entities of a - target EntityType. - - Returns: - Callable[[~.ExportFeatureValuesRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'export_feature_values' not in self._stubs: - self._stubs['export_feature_values'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/ExportFeatureValues', - request_serializer=featurestore_service.ExportFeatureValuesRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['export_feature_values'] - - @property - def search_features(self) -> Callable[ - [featurestore_service.SearchFeaturesRequest], - featurestore_service.SearchFeaturesResponse]: - r"""Return a callable for the search features method over gRPC. - - Searches Features matching a query in a given - project. - - Returns: - Callable[[~.SearchFeaturesRequest], - ~.SearchFeaturesResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'search_features' not in self._stubs: - self._stubs['search_features'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/SearchFeatures', - request_serializer=featurestore_service.SearchFeaturesRequest.serialize, - response_deserializer=featurestore_service.SearchFeaturesResponse.deserialize, - ) - return self._stubs['search_features'] - - def close(self): - self.grpc_channel.close() - -__all__ = ( - 'FeaturestoreServiceGrpcTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc_asyncio.py deleted file mode 100644 index c093d2122a..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/grpc_asyncio.py +++ /dev/null @@ -1,809 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers_async -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.aiplatform_v1beta1.types import entity_type -from google.cloud.aiplatform_v1beta1.types import entity_type as gca_entity_type -from google.cloud.aiplatform_v1beta1.types import feature -from google.cloud.aiplatform_v1beta1.types import feature as gca_feature -from google.cloud.aiplatform_v1beta1.types import featurestore -from google.cloud.aiplatform_v1beta1.types import featurestore_service -from google.longrunning import operations_pb2 # type: ignore -from .base import FeaturestoreServiceTransport, DEFAULT_CLIENT_INFO -from .grpc import FeaturestoreServiceGrpcTransport - - -class FeaturestoreServiceGrpcAsyncIOTransport(FeaturestoreServiceTransport): - """gRPC AsyncIO backend transport for FeaturestoreService. - - The service that handles CRUD and List for resources for - Featurestore. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsAsyncClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_featurestore(self) -> Callable[ - [featurestore_service.CreateFeaturestoreRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the create featurestore method over gRPC. - - Creates a new Featurestore in a given project and - location. - - Returns: - Callable[[~.CreateFeaturestoreRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_featurestore' not in self._stubs: - self._stubs['create_featurestore'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/CreateFeaturestore', - request_serializer=featurestore_service.CreateFeaturestoreRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_featurestore'] - - @property - def get_featurestore(self) -> Callable[ - [featurestore_service.GetFeaturestoreRequest], - Awaitable[featurestore.Featurestore]]: - r"""Return a callable for the get featurestore method over gRPC. - - Gets details of a single Featurestore. - - Returns: - Callable[[~.GetFeaturestoreRequest], - Awaitable[~.Featurestore]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_featurestore' not in self._stubs: - self._stubs['get_featurestore'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/GetFeaturestore', - request_serializer=featurestore_service.GetFeaturestoreRequest.serialize, - response_deserializer=featurestore.Featurestore.deserialize, - ) - return self._stubs['get_featurestore'] - - @property - def list_featurestores(self) -> Callable[ - [featurestore_service.ListFeaturestoresRequest], - Awaitable[featurestore_service.ListFeaturestoresResponse]]: - r"""Return a callable for the list featurestores method over gRPC. - - Lists Featurestores in a given project and location. - - Returns: - Callable[[~.ListFeaturestoresRequest], - Awaitable[~.ListFeaturestoresResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_featurestores' not in self._stubs: - self._stubs['list_featurestores'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/ListFeaturestores', - request_serializer=featurestore_service.ListFeaturestoresRequest.serialize, - response_deserializer=featurestore_service.ListFeaturestoresResponse.deserialize, - ) - return self._stubs['list_featurestores'] - - @property - def update_featurestore(self) -> Callable[ - [featurestore_service.UpdateFeaturestoreRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the update featurestore method over gRPC. - - Updates the parameters of a single Featurestore. - - Returns: - Callable[[~.UpdateFeaturestoreRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_featurestore' not in self._stubs: - self._stubs['update_featurestore'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/UpdateFeaturestore', - request_serializer=featurestore_service.UpdateFeaturestoreRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['update_featurestore'] - - @property - def delete_featurestore(self) -> Callable[ - [featurestore_service.DeleteFeaturestoreRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete featurestore method over gRPC. - - Deletes a single Featurestore. The Featurestore must not contain - any EntityTypes or ``force`` must be set to true for the request - to succeed. - - Returns: - Callable[[~.DeleteFeaturestoreRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_featurestore' not in self._stubs: - self._stubs['delete_featurestore'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/DeleteFeaturestore', - request_serializer=featurestore_service.DeleteFeaturestoreRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_featurestore'] - - @property - def create_entity_type(self) -> Callable[ - [featurestore_service.CreateEntityTypeRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the create entity type method over gRPC. - - Creates a new EntityType in a given Featurestore. - - Returns: - Callable[[~.CreateEntityTypeRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_entity_type' not in self._stubs: - self._stubs['create_entity_type'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/CreateEntityType', - request_serializer=featurestore_service.CreateEntityTypeRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_entity_type'] - - @property - def get_entity_type(self) -> Callable[ - [featurestore_service.GetEntityTypeRequest], - Awaitable[entity_type.EntityType]]: - r"""Return a callable for the get entity type method over gRPC. - - Gets details of a single EntityType. - - Returns: - Callable[[~.GetEntityTypeRequest], - Awaitable[~.EntityType]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_entity_type' not in self._stubs: - self._stubs['get_entity_type'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/GetEntityType', - request_serializer=featurestore_service.GetEntityTypeRequest.serialize, - response_deserializer=entity_type.EntityType.deserialize, - ) - return self._stubs['get_entity_type'] - - @property - def list_entity_types(self) -> Callable[ - [featurestore_service.ListEntityTypesRequest], - Awaitable[featurestore_service.ListEntityTypesResponse]]: - r"""Return a callable for the list entity types method over gRPC. - - Lists EntityTypes in a given Featurestore. - - Returns: - Callable[[~.ListEntityTypesRequest], - Awaitable[~.ListEntityTypesResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_entity_types' not in self._stubs: - self._stubs['list_entity_types'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/ListEntityTypes', - request_serializer=featurestore_service.ListEntityTypesRequest.serialize, - response_deserializer=featurestore_service.ListEntityTypesResponse.deserialize, - ) - return self._stubs['list_entity_types'] - - @property - def update_entity_type(self) -> Callable[ - [featurestore_service.UpdateEntityTypeRequest], - Awaitable[gca_entity_type.EntityType]]: - r"""Return a callable for the update entity type method over gRPC. - - Updates the parameters of a single EntityType. - - Returns: - Callable[[~.UpdateEntityTypeRequest], - Awaitable[~.EntityType]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_entity_type' not in self._stubs: - self._stubs['update_entity_type'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/UpdateEntityType', - request_serializer=featurestore_service.UpdateEntityTypeRequest.serialize, - response_deserializer=gca_entity_type.EntityType.deserialize, - ) - return self._stubs['update_entity_type'] - - @property - def delete_entity_type(self) -> Callable[ - [featurestore_service.DeleteEntityTypeRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete entity type method over gRPC. - - Deletes a single EntityType. The EntityType must not have any - Features or ``force`` must be set to true for the request to - succeed. - - Returns: - Callable[[~.DeleteEntityTypeRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_entity_type' not in self._stubs: - self._stubs['delete_entity_type'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/DeleteEntityType', - request_serializer=featurestore_service.DeleteEntityTypeRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_entity_type'] - - @property - def create_feature(self) -> Callable[ - [featurestore_service.CreateFeatureRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the create feature method over gRPC. - - Creates a new Feature in a given EntityType. - - Returns: - Callable[[~.CreateFeatureRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_feature' not in self._stubs: - self._stubs['create_feature'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/CreateFeature', - request_serializer=featurestore_service.CreateFeatureRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_feature'] - - @property - def batch_create_features(self) -> Callable[ - [featurestore_service.BatchCreateFeaturesRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the batch create features method over gRPC. - - Creates a batch of Features in a given EntityType. - - Returns: - Callable[[~.BatchCreateFeaturesRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'batch_create_features' not in self._stubs: - self._stubs['batch_create_features'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/BatchCreateFeatures', - request_serializer=featurestore_service.BatchCreateFeaturesRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['batch_create_features'] - - @property - def get_feature(self) -> Callable[ - [featurestore_service.GetFeatureRequest], - Awaitable[feature.Feature]]: - r"""Return a callable for the get feature method over gRPC. - - Gets details of a single Feature. - - Returns: - Callable[[~.GetFeatureRequest], - Awaitable[~.Feature]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_feature' not in self._stubs: - self._stubs['get_feature'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/GetFeature', - request_serializer=featurestore_service.GetFeatureRequest.serialize, - response_deserializer=feature.Feature.deserialize, - ) - return self._stubs['get_feature'] - - @property - def list_features(self) -> Callable[ - [featurestore_service.ListFeaturesRequest], - Awaitable[featurestore_service.ListFeaturesResponse]]: - r"""Return a callable for the list features method over gRPC. - - Lists Features in a given EntityType. - - Returns: - Callable[[~.ListFeaturesRequest], - Awaitable[~.ListFeaturesResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_features' not in self._stubs: - self._stubs['list_features'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/ListFeatures', - request_serializer=featurestore_service.ListFeaturesRequest.serialize, - response_deserializer=featurestore_service.ListFeaturesResponse.deserialize, - ) - return self._stubs['list_features'] - - @property - def update_feature(self) -> Callable[ - [featurestore_service.UpdateFeatureRequest], - Awaitable[gca_feature.Feature]]: - r"""Return a callable for the update feature method over gRPC. - - Updates the parameters of a single Feature. - - Returns: - Callable[[~.UpdateFeatureRequest], - Awaitable[~.Feature]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_feature' not in self._stubs: - self._stubs['update_feature'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/UpdateFeature', - request_serializer=featurestore_service.UpdateFeatureRequest.serialize, - response_deserializer=gca_feature.Feature.deserialize, - ) - return self._stubs['update_feature'] - - @property - def delete_feature(self) -> Callable[ - [featurestore_service.DeleteFeatureRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete feature method over gRPC. - - Deletes a single Feature. - - Returns: - Callable[[~.DeleteFeatureRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_feature' not in self._stubs: - self._stubs['delete_feature'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/DeleteFeature', - request_serializer=featurestore_service.DeleteFeatureRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_feature'] - - @property - def import_feature_values(self) -> Callable[ - [featurestore_service.ImportFeatureValuesRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the import feature values method over gRPC. - - Imports Feature values into the Featurestore from a - source storage. - The progress of the import is tracked by the returned - operation. The imported features are guaranteed to be - visible to subsequent read operations after the - operation is marked as successfully done. - If an import operation fails, the Feature values - returned from reads and exports may be inconsistent. If - consistency is required, the caller must retry the same - import request again and wait till the new operation - returned is marked as successfully done. - There are also scenarios where the caller can cause - inconsistency. - - Source data for import contains multiple distinct - Feature values for the same entity ID and timestamp. - - Source is modified during an import. This includes - adding, updating, or removing source data and/or - metadata. Examples of updating metadata include but are - not limited to changing storage location, storage class, - or retention policy. - - Online serving cluster is under-provisioned. - - Returns: - Callable[[~.ImportFeatureValuesRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'import_feature_values' not in self._stubs: - self._stubs['import_feature_values'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/ImportFeatureValues', - request_serializer=featurestore_service.ImportFeatureValuesRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['import_feature_values'] - - @property - def batch_read_feature_values(self) -> Callable[ - [featurestore_service.BatchReadFeatureValuesRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the batch read feature values method over gRPC. - - Batch reads Feature values from a Featurestore. - This API enables batch reading Feature values, where - each read instance in the batch may read Feature values - of entities from one or more EntityTypes. Point-in-time - correctness is guaranteed for Feature values of each - read instance as of each instance's read timestamp. - - Returns: - Callable[[~.BatchReadFeatureValuesRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'batch_read_feature_values' not in self._stubs: - self._stubs['batch_read_feature_values'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/BatchReadFeatureValues', - request_serializer=featurestore_service.BatchReadFeatureValuesRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['batch_read_feature_values'] - - @property - def export_feature_values(self) -> Callable[ - [featurestore_service.ExportFeatureValuesRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the export feature values method over gRPC. - - Exports Feature values from all the entities of a - target EntityType. - - Returns: - Callable[[~.ExportFeatureValuesRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'export_feature_values' not in self._stubs: - self._stubs['export_feature_values'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/ExportFeatureValues', - request_serializer=featurestore_service.ExportFeatureValuesRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['export_feature_values'] - - @property - def search_features(self) -> Callable[ - [featurestore_service.SearchFeaturesRequest], - Awaitable[featurestore_service.SearchFeaturesResponse]]: - r"""Return a callable for the search features method over gRPC. - - Searches Features matching a query in a given - project. - - Returns: - Callable[[~.SearchFeaturesRequest], - Awaitable[~.SearchFeaturesResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'search_features' not in self._stubs: - self._stubs['search_features'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.FeaturestoreService/SearchFeatures', - request_serializer=featurestore_service.SearchFeaturesRequest.serialize, - response_deserializer=featurestore_service.SearchFeaturesResponse.deserialize, - ) - return self._stubs['search_features'] - - def close(self): - return self.grpc_channel.close() - - -__all__ = ( - 'FeaturestoreServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/__init__.py deleted file mode 100644 index fb5d596b18..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import IndexEndpointServiceClient -from .async_client import IndexEndpointServiceAsyncClient - -__all__ = ( - 'IndexEndpointServiceClient', - 'IndexEndpointServiceAsyncClient', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/async_client.py deleted file mode 100644 index ca3a0947b7..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/async_client.py +++ /dev/null @@ -1,925 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core.client_options import ClientOptions -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.index_endpoint_service import pagers -from google.cloud.aiplatform_v1beta1.types import index_endpoint -from google.cloud.aiplatform_v1beta1.types import index_endpoint as gca_index_endpoint -from google.cloud.aiplatform_v1beta1.types import index_endpoint_service -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import IndexEndpointServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import IndexEndpointServiceGrpcAsyncIOTransport -from .client import IndexEndpointServiceClient - - -class IndexEndpointServiceAsyncClient: - """A service for managing Vertex AI's IndexEndpoints.""" - - _client: IndexEndpointServiceClient - - DEFAULT_ENDPOINT = IndexEndpointServiceClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = IndexEndpointServiceClient.DEFAULT_MTLS_ENDPOINT - - index_path = staticmethod(IndexEndpointServiceClient.index_path) - parse_index_path = staticmethod(IndexEndpointServiceClient.parse_index_path) - index_endpoint_path = staticmethod(IndexEndpointServiceClient.index_endpoint_path) - parse_index_endpoint_path = staticmethod(IndexEndpointServiceClient.parse_index_endpoint_path) - common_billing_account_path = staticmethod(IndexEndpointServiceClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(IndexEndpointServiceClient.parse_common_billing_account_path) - common_folder_path = staticmethod(IndexEndpointServiceClient.common_folder_path) - parse_common_folder_path = staticmethod(IndexEndpointServiceClient.parse_common_folder_path) - common_organization_path = staticmethod(IndexEndpointServiceClient.common_organization_path) - parse_common_organization_path = staticmethod(IndexEndpointServiceClient.parse_common_organization_path) - common_project_path = staticmethod(IndexEndpointServiceClient.common_project_path) - parse_common_project_path = staticmethod(IndexEndpointServiceClient.parse_common_project_path) - common_location_path = staticmethod(IndexEndpointServiceClient.common_location_path) - parse_common_location_path = staticmethod(IndexEndpointServiceClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - IndexEndpointServiceAsyncClient: The constructed client. - """ - return IndexEndpointServiceClient.from_service_account_info.__func__(IndexEndpointServiceAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - IndexEndpointServiceAsyncClient: The constructed client. - """ - return IndexEndpointServiceClient.from_service_account_file.__func__(IndexEndpointServiceAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> IndexEndpointServiceTransport: - """Returns the transport used by the client instance. - - Returns: - IndexEndpointServiceTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(IndexEndpointServiceClient).get_transport_class, type(IndexEndpointServiceClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, IndexEndpointServiceTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the index endpoint service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.IndexEndpointServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = IndexEndpointServiceClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def create_index_endpoint(self, - request: Union[index_endpoint_service.CreateIndexEndpointRequest, dict] = None, - *, - parent: str = None, - index_endpoint: gca_index_endpoint.IndexEndpoint = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Creates an IndexEndpoint. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CreateIndexEndpointRequest, dict]): - The request object. Request message for - [IndexEndpointService.CreateIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.CreateIndexEndpoint]. - parent (:class:`str`): - Required. The resource name of the Location to create - the IndexEndpoint in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - index_endpoint (:class:`google.cloud.aiplatform_v1beta1.types.IndexEndpoint`): - Required. The IndexEndpoint to - create. - - This corresponds to the ``index_endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.IndexEndpoint` Indexes are deployed into it. An IndexEndpoint can have multiple - DeployedIndexes. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, index_endpoint]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = index_endpoint_service.CreateIndexEndpointRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if index_endpoint is not None: - request.index_endpoint = index_endpoint - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_index_endpoint, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - gca_index_endpoint.IndexEndpoint, - metadata_type=index_endpoint_service.CreateIndexEndpointOperationMetadata, - ) - - # Done; return the response. - return response - - async def get_index_endpoint(self, - request: Union[index_endpoint_service.GetIndexEndpointRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> index_endpoint.IndexEndpoint: - r"""Gets an IndexEndpoint. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.GetIndexEndpointRequest, dict]): - The request object. Request message for - [IndexEndpointService.GetIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.GetIndexEndpoint] - name (:class:`str`): - Required. The name of the IndexEndpoint resource. - Format: - ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.IndexEndpoint: - Indexes are deployed into it. An - IndexEndpoint can have multiple - DeployedIndexes. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = index_endpoint_service.GetIndexEndpointRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_index_endpoint, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_index_endpoints(self, - request: Union[index_endpoint_service.ListIndexEndpointsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListIndexEndpointsAsyncPager: - r"""Lists IndexEndpoints in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListIndexEndpointsRequest, dict]): - The request object. Request message for - [IndexEndpointService.ListIndexEndpoints][google.cloud.aiplatform.v1beta1.IndexEndpointService.ListIndexEndpoints]. - parent (:class:`str`): - Required. The resource name of the Location from which - to list the IndexEndpoints. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.index_endpoint_service.pagers.ListIndexEndpointsAsyncPager: - Response message for - [IndexEndpointService.ListIndexEndpoints][google.cloud.aiplatform.v1beta1.IndexEndpointService.ListIndexEndpoints]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = index_endpoint_service.ListIndexEndpointsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_index_endpoints, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListIndexEndpointsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_index_endpoint(self, - request: Union[index_endpoint_service.UpdateIndexEndpointRequest, dict] = None, - *, - index_endpoint: gca_index_endpoint.IndexEndpoint = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_index_endpoint.IndexEndpoint: - r"""Updates an IndexEndpoint. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.UpdateIndexEndpointRequest, dict]): - The request object. Request message for - [IndexEndpointService.UpdateIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.UpdateIndexEndpoint]. - index_endpoint (:class:`google.cloud.aiplatform_v1beta1.types.IndexEndpoint`): - Required. The IndexEndpoint which - replaces the resource on the server. - - This corresponds to the ``index_endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. The update mask applies to the resource. See - [google.protobuf.FieldMask][google.protobuf.FieldMask]. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.IndexEndpoint: - Indexes are deployed into it. An - IndexEndpoint can have multiple - DeployedIndexes. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([index_endpoint, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = index_endpoint_service.UpdateIndexEndpointRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if index_endpoint is not None: - request.index_endpoint = index_endpoint - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_index_endpoint, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("index_endpoint.name", request.index_endpoint.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_index_endpoint(self, - request: Union[index_endpoint_service.DeleteIndexEndpointRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes an IndexEndpoint. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.DeleteIndexEndpointRequest, dict]): - The request object. Request message for - [IndexEndpointService.DeleteIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeleteIndexEndpoint]. - name (:class:`str`): - Required. The name of the IndexEndpoint resource to be - deleted. Format: - ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = index_endpoint_service.DeleteIndexEndpointRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_index_endpoint, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def deploy_index(self, - request: Union[index_endpoint_service.DeployIndexRequest, dict] = None, - *, - index_endpoint: str = None, - deployed_index: gca_index_endpoint.DeployedIndex = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deploys an Index into this IndexEndpoint, creating a - DeployedIndex within it. - Only non-empty Indexes can be deployed. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.DeployIndexRequest, dict]): - The request object. Request message for - [IndexEndpointService.DeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeployIndex]. - index_endpoint (:class:`str`): - Required. The name of the IndexEndpoint resource into - which to deploy an Index. Format: - ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` - - This corresponds to the ``index_endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - deployed_index (:class:`google.cloud.aiplatform_v1beta1.types.DeployedIndex`): - Required. The DeployedIndex to be - created within the IndexEndpoint. - - This corresponds to the ``deployed_index`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.DeployIndexResponse` - Response message for - [IndexEndpointService.DeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeployIndex]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([index_endpoint, deployed_index]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = index_endpoint_service.DeployIndexRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if index_endpoint is not None: - request.index_endpoint = index_endpoint - if deployed_index is not None: - request.deployed_index = deployed_index - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.deploy_index, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("index_endpoint", request.index_endpoint), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - index_endpoint_service.DeployIndexResponse, - metadata_type=index_endpoint_service.DeployIndexOperationMetadata, - ) - - # Done; return the response. - return response - - async def undeploy_index(self, - request: Union[index_endpoint_service.UndeployIndexRequest, dict] = None, - *, - index_endpoint: str = None, - deployed_index_id: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Undeploys an Index from an IndexEndpoint, removing a - DeployedIndex from it, and freeing all resources it's - using. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.UndeployIndexRequest, dict]): - The request object. Request message for - [IndexEndpointService.UndeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.UndeployIndex]. - index_endpoint (:class:`str`): - Required. The name of the IndexEndpoint resource from - which to undeploy an Index. Format: - ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` - - This corresponds to the ``index_endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - deployed_index_id (:class:`str`): - Required. The ID of the DeployedIndex - to be undeployed from the IndexEndpoint. - - This corresponds to the ``deployed_index_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.UndeployIndexResponse` - Response message for - [IndexEndpointService.UndeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.UndeployIndex]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([index_endpoint, deployed_index_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = index_endpoint_service.UndeployIndexRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if index_endpoint is not None: - request.index_endpoint = index_endpoint - if deployed_index_id is not None: - request.deployed_index_id = deployed_index_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.undeploy_index, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("index_endpoint", request.index_endpoint), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - index_endpoint_service.UndeployIndexResponse, - metadata_type=index_endpoint_service.UndeployIndexOperationMetadata, - ) - - # Done; return the response. - return response - - async def mutate_deployed_index(self, - request: Union[index_endpoint_service.MutateDeployedIndexRequest, dict] = None, - *, - index_endpoint: str = None, - deployed_index: gca_index_endpoint.DeployedIndex = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Update an existing DeployedIndex under an - IndexEndpoint. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.MutateDeployedIndexRequest, dict]): - The request object. Request message for - [IndexEndpointService.MutateDeployedIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.MutateDeployedIndex]. - index_endpoint (:class:`str`): - Required. The name of the IndexEndpoint resource into - which to deploy an Index. Format: - ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` - - This corresponds to the ``index_endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - deployed_index (:class:`google.cloud.aiplatform_v1beta1.types.DeployedIndex`): - Required. The DeployedIndex to be updated within the - IndexEndpoint. Currently, the updatable fields are - [DeployedIndex][automatic_resources] and - [DeployedIndex][dedicated_resources] - - This corresponds to the ``deployed_index`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.MutateDeployedIndexResponse` - Response message for - [IndexEndpointService.MutateDeployedIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.MutateDeployedIndex]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([index_endpoint, deployed_index]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = index_endpoint_service.MutateDeployedIndexRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if index_endpoint is not None: - request.index_endpoint = index_endpoint - if deployed_index is not None: - request.deployed_index = deployed_index - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.mutate_deployed_index, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("index_endpoint", request.index_endpoint), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - index_endpoint_service.MutateDeployedIndexResponse, - metadata_type=index_endpoint_service.MutateDeployedIndexOperationMetadata, - ) - - # Done; return the response. - return response - - async def __aenter__(self): - return self - - async def __aexit__(self, exc_type, exc, tb): - await self.transport.close() - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "IndexEndpointServiceAsyncClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/client.py deleted file mode 100644 index 80d3b4662c..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/client.py +++ /dev/null @@ -1,1132 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import os -import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.index_endpoint_service import pagers -from google.cloud.aiplatform_v1beta1.types import index_endpoint -from google.cloud.aiplatform_v1beta1.types import index_endpoint as gca_index_endpoint -from google.cloud.aiplatform_v1beta1.types import index_endpoint_service -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import IndexEndpointServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import IndexEndpointServiceGrpcTransport -from .transports.grpc_asyncio import IndexEndpointServiceGrpcAsyncIOTransport - - -class IndexEndpointServiceClientMeta(type): - """Metaclass for the IndexEndpointService client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[IndexEndpointServiceTransport]] - _transport_registry["grpc"] = IndexEndpointServiceGrpcTransport - _transport_registry["grpc_asyncio"] = IndexEndpointServiceGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[IndexEndpointServiceTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class IndexEndpointServiceClient(metaclass=IndexEndpointServiceClientMeta): - """A service for managing Vertex AI's IndexEndpoints.""" - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "aiplatform.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - IndexEndpointServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - IndexEndpointServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> IndexEndpointServiceTransport: - """Returns the transport used by the client instance. - - Returns: - IndexEndpointServiceTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def index_path(project: str,location: str,index: str,) -> str: - """Returns a fully-qualified index string.""" - return "projects/{project}/locations/{location}/indexes/{index}".format(project=project, location=location, index=index, ) - - @staticmethod - def parse_index_path(path: str) -> Dict[str,str]: - """Parses a index path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/indexes/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def index_endpoint_path(project: str,location: str,index_endpoint: str,) -> str: - """Returns a fully-qualified index_endpoint string.""" - return "projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}".format(project=project, location=location, index_endpoint=index_endpoint, ) - - @staticmethod - def parse_index_endpoint_path(path: str) -> Dict[str,str]: - """Parses a index_endpoint path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/indexEndpoints/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, IndexEndpointServiceTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the index endpoint service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, IndexEndpointServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): - raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, IndexEndpointServiceTransport): - # transport is a IndexEndpointServiceTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - always_use_jwt_access=True, - ) - - def create_index_endpoint(self, - request: Union[index_endpoint_service.CreateIndexEndpointRequest, dict] = None, - *, - parent: str = None, - index_endpoint: gca_index_endpoint.IndexEndpoint = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Creates an IndexEndpoint. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CreateIndexEndpointRequest, dict]): - The request object. Request message for - [IndexEndpointService.CreateIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.CreateIndexEndpoint]. - parent (str): - Required. The resource name of the Location to create - the IndexEndpoint in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - index_endpoint (google.cloud.aiplatform_v1beta1.types.IndexEndpoint): - Required. The IndexEndpoint to - create. - - This corresponds to the ``index_endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.IndexEndpoint` Indexes are deployed into it. An IndexEndpoint can have multiple - DeployedIndexes. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, index_endpoint]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a index_endpoint_service.CreateIndexEndpointRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, index_endpoint_service.CreateIndexEndpointRequest): - request = index_endpoint_service.CreateIndexEndpointRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if index_endpoint is not None: - request.index_endpoint = index_endpoint - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_index_endpoint] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - gca_index_endpoint.IndexEndpoint, - metadata_type=index_endpoint_service.CreateIndexEndpointOperationMetadata, - ) - - # Done; return the response. - return response - - def get_index_endpoint(self, - request: Union[index_endpoint_service.GetIndexEndpointRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> index_endpoint.IndexEndpoint: - r"""Gets an IndexEndpoint. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.GetIndexEndpointRequest, dict]): - The request object. Request message for - [IndexEndpointService.GetIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.GetIndexEndpoint] - name (str): - Required. The name of the IndexEndpoint resource. - Format: - ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.IndexEndpoint: - Indexes are deployed into it. An - IndexEndpoint can have multiple - DeployedIndexes. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a index_endpoint_service.GetIndexEndpointRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, index_endpoint_service.GetIndexEndpointRequest): - request = index_endpoint_service.GetIndexEndpointRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_index_endpoint] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_index_endpoints(self, - request: Union[index_endpoint_service.ListIndexEndpointsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListIndexEndpointsPager: - r"""Lists IndexEndpoints in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListIndexEndpointsRequest, dict]): - The request object. Request message for - [IndexEndpointService.ListIndexEndpoints][google.cloud.aiplatform.v1beta1.IndexEndpointService.ListIndexEndpoints]. - parent (str): - Required. The resource name of the Location from which - to list the IndexEndpoints. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.index_endpoint_service.pagers.ListIndexEndpointsPager: - Response message for - [IndexEndpointService.ListIndexEndpoints][google.cloud.aiplatform.v1beta1.IndexEndpointService.ListIndexEndpoints]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a index_endpoint_service.ListIndexEndpointsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, index_endpoint_service.ListIndexEndpointsRequest): - request = index_endpoint_service.ListIndexEndpointsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_index_endpoints] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListIndexEndpointsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_index_endpoint(self, - request: Union[index_endpoint_service.UpdateIndexEndpointRequest, dict] = None, - *, - index_endpoint: gca_index_endpoint.IndexEndpoint = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_index_endpoint.IndexEndpoint: - r"""Updates an IndexEndpoint. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.UpdateIndexEndpointRequest, dict]): - The request object. Request message for - [IndexEndpointService.UpdateIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.UpdateIndexEndpoint]. - index_endpoint (google.cloud.aiplatform_v1beta1.types.IndexEndpoint): - Required. The IndexEndpoint which - replaces the resource on the server. - - This corresponds to the ``index_endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The update mask applies to the resource. See - [google.protobuf.FieldMask][google.protobuf.FieldMask]. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.IndexEndpoint: - Indexes are deployed into it. An - IndexEndpoint can have multiple - DeployedIndexes. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([index_endpoint, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a index_endpoint_service.UpdateIndexEndpointRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, index_endpoint_service.UpdateIndexEndpointRequest): - request = index_endpoint_service.UpdateIndexEndpointRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if index_endpoint is not None: - request.index_endpoint = index_endpoint - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_index_endpoint] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("index_endpoint.name", request.index_endpoint.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_index_endpoint(self, - request: Union[index_endpoint_service.DeleteIndexEndpointRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes an IndexEndpoint. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.DeleteIndexEndpointRequest, dict]): - The request object. Request message for - [IndexEndpointService.DeleteIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeleteIndexEndpoint]. - name (str): - Required. The name of the IndexEndpoint resource to be - deleted. Format: - ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a index_endpoint_service.DeleteIndexEndpointRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, index_endpoint_service.DeleteIndexEndpointRequest): - request = index_endpoint_service.DeleteIndexEndpointRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_index_endpoint] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def deploy_index(self, - request: Union[index_endpoint_service.DeployIndexRequest, dict] = None, - *, - index_endpoint: str = None, - deployed_index: gca_index_endpoint.DeployedIndex = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deploys an Index into this IndexEndpoint, creating a - DeployedIndex within it. - Only non-empty Indexes can be deployed. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.DeployIndexRequest, dict]): - The request object. Request message for - [IndexEndpointService.DeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeployIndex]. - index_endpoint (str): - Required. The name of the IndexEndpoint resource into - which to deploy an Index. Format: - ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` - - This corresponds to the ``index_endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - deployed_index (google.cloud.aiplatform_v1beta1.types.DeployedIndex): - Required. The DeployedIndex to be - created within the IndexEndpoint. - - This corresponds to the ``deployed_index`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.DeployIndexResponse` - Response message for - [IndexEndpointService.DeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeployIndex]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([index_endpoint, deployed_index]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a index_endpoint_service.DeployIndexRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, index_endpoint_service.DeployIndexRequest): - request = index_endpoint_service.DeployIndexRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if index_endpoint is not None: - request.index_endpoint = index_endpoint - if deployed_index is not None: - request.deployed_index = deployed_index - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.deploy_index] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("index_endpoint", request.index_endpoint), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - index_endpoint_service.DeployIndexResponse, - metadata_type=index_endpoint_service.DeployIndexOperationMetadata, - ) - - # Done; return the response. - return response - - def undeploy_index(self, - request: Union[index_endpoint_service.UndeployIndexRequest, dict] = None, - *, - index_endpoint: str = None, - deployed_index_id: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Undeploys an Index from an IndexEndpoint, removing a - DeployedIndex from it, and freeing all resources it's - using. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.UndeployIndexRequest, dict]): - The request object. Request message for - [IndexEndpointService.UndeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.UndeployIndex]. - index_endpoint (str): - Required. The name of the IndexEndpoint resource from - which to undeploy an Index. Format: - ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` - - This corresponds to the ``index_endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - deployed_index_id (str): - Required. The ID of the DeployedIndex - to be undeployed from the IndexEndpoint. - - This corresponds to the ``deployed_index_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.UndeployIndexResponse` - Response message for - [IndexEndpointService.UndeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.UndeployIndex]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([index_endpoint, deployed_index_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a index_endpoint_service.UndeployIndexRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, index_endpoint_service.UndeployIndexRequest): - request = index_endpoint_service.UndeployIndexRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if index_endpoint is not None: - request.index_endpoint = index_endpoint - if deployed_index_id is not None: - request.deployed_index_id = deployed_index_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.undeploy_index] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("index_endpoint", request.index_endpoint), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - index_endpoint_service.UndeployIndexResponse, - metadata_type=index_endpoint_service.UndeployIndexOperationMetadata, - ) - - # Done; return the response. - return response - - def mutate_deployed_index(self, - request: Union[index_endpoint_service.MutateDeployedIndexRequest, dict] = None, - *, - index_endpoint: str = None, - deployed_index: gca_index_endpoint.DeployedIndex = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Update an existing DeployedIndex under an - IndexEndpoint. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.MutateDeployedIndexRequest, dict]): - The request object. Request message for - [IndexEndpointService.MutateDeployedIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.MutateDeployedIndex]. - index_endpoint (str): - Required. The name of the IndexEndpoint resource into - which to deploy an Index. Format: - ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` - - This corresponds to the ``index_endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - deployed_index (google.cloud.aiplatform_v1beta1.types.DeployedIndex): - Required. The DeployedIndex to be updated within the - IndexEndpoint. Currently, the updatable fields are - [DeployedIndex][automatic_resources] and - [DeployedIndex][dedicated_resources] - - This corresponds to the ``deployed_index`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.MutateDeployedIndexResponse` - Response message for - [IndexEndpointService.MutateDeployedIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.MutateDeployedIndex]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([index_endpoint, deployed_index]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a index_endpoint_service.MutateDeployedIndexRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, index_endpoint_service.MutateDeployedIndexRequest): - request = index_endpoint_service.MutateDeployedIndexRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if index_endpoint is not None: - request.index_endpoint = index_endpoint - if deployed_index is not None: - request.deployed_index = deployed_index - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.mutate_deployed_index] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("index_endpoint", request.index_endpoint), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - index_endpoint_service.MutateDeployedIndexResponse, - metadata_type=index_endpoint_service.MutateDeployedIndexOperationMetadata, - ) - - # Done; return the response. - return response - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - """Releases underlying transport's resources. - - .. warning:: - ONLY use as a context manager if the transport is NOT shared - with other clients! Exiting the with block will CLOSE the transport - and may cause errors in other clients! - """ - self.transport.close() - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "IndexEndpointServiceClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/pagers.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/pagers.py deleted file mode 100644 index b237d749c9..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/pagers.py +++ /dev/null @@ -1,141 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator - -from google.cloud.aiplatform_v1beta1.types import index_endpoint -from google.cloud.aiplatform_v1beta1.types import index_endpoint_service - - -class ListIndexEndpointsPager: - """A pager for iterating through ``list_index_endpoints`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListIndexEndpointsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``index_endpoints`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListIndexEndpoints`` requests and continue to iterate - through the ``index_endpoints`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListIndexEndpointsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., index_endpoint_service.ListIndexEndpointsResponse], - request: index_endpoint_service.ListIndexEndpointsRequest, - response: index_endpoint_service.ListIndexEndpointsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListIndexEndpointsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListIndexEndpointsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = index_endpoint_service.ListIndexEndpointsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[index_endpoint_service.ListIndexEndpointsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[index_endpoint.IndexEndpoint]: - for page in self.pages: - yield from page.index_endpoints - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListIndexEndpointsAsyncPager: - """A pager for iterating through ``list_index_endpoints`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListIndexEndpointsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``index_endpoints`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListIndexEndpoints`` requests and continue to iterate - through the ``index_endpoints`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListIndexEndpointsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[index_endpoint_service.ListIndexEndpointsResponse]], - request: index_endpoint_service.ListIndexEndpointsRequest, - response: index_endpoint_service.ListIndexEndpointsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListIndexEndpointsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListIndexEndpointsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = index_endpoint_service.ListIndexEndpointsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[index_endpoint_service.ListIndexEndpointsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[index_endpoint.IndexEndpoint]: - async def async_generator(): - async for page in self.pages: - for response in page.index_endpoints: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/__init__.py deleted file mode 100644 index 42d3519efd..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import IndexEndpointServiceTransport -from .grpc import IndexEndpointServiceGrpcTransport -from .grpc_asyncio import IndexEndpointServiceGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[IndexEndpointServiceTransport]] -_transport_registry['grpc'] = IndexEndpointServiceGrpcTransport -_transport_registry['grpc_asyncio'] = IndexEndpointServiceGrpcAsyncIOTransport - -__all__ = ( - 'IndexEndpointServiceTransport', - 'IndexEndpointServiceGrpcTransport', - 'IndexEndpointServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/base.py deleted file mode 100644 index d967c9f848..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/base.py +++ /dev/null @@ -1,253 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import pkg_resources - -import google.auth # type: ignore -import google.api_core -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.aiplatform_v1beta1.types import index_endpoint -from google.cloud.aiplatform_v1beta1.types import index_endpoint as gca_index_endpoint -from google.cloud.aiplatform_v1beta1.types import index_endpoint_service -from google.longrunning import operations_pb2 # type: ignore - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-aiplatform', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -class IndexEndpointServiceTransport(abc.ABC): - """Abstract transport class for IndexEndpointService.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'aiplatform.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials are service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.create_index_endpoint: gapic_v1.method.wrap_method( - self.create_index_endpoint, - default_timeout=5.0, - client_info=client_info, - ), - self.get_index_endpoint: gapic_v1.method.wrap_method( - self.get_index_endpoint, - default_timeout=5.0, - client_info=client_info, - ), - self.list_index_endpoints: gapic_v1.method.wrap_method( - self.list_index_endpoints, - default_timeout=5.0, - client_info=client_info, - ), - self.update_index_endpoint: gapic_v1.method.wrap_method( - self.update_index_endpoint, - default_timeout=5.0, - client_info=client_info, - ), - self.delete_index_endpoint: gapic_v1.method.wrap_method( - self.delete_index_endpoint, - default_timeout=5.0, - client_info=client_info, - ), - self.deploy_index: gapic_v1.method.wrap_method( - self.deploy_index, - default_timeout=5.0, - client_info=client_info, - ), - self.undeploy_index: gapic_v1.method.wrap_method( - self.undeploy_index, - default_timeout=5.0, - client_info=client_info, - ), - self.mutate_deployed_index: gapic_v1.method.wrap_method( - self.mutate_deployed_index, - default_timeout=None, - client_info=client_info, - ), - } - - def close(self): - """Closes resources associated with the transport. - - .. warning:: - Only call this method if the transport is NOT shared - with other clients - this may cause errors in other clients! - """ - raise NotImplementedError() - - @property - def operations_client(self): - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def create_index_endpoint(self) -> Callable[ - [index_endpoint_service.CreateIndexEndpointRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def get_index_endpoint(self) -> Callable[ - [index_endpoint_service.GetIndexEndpointRequest], - Union[ - index_endpoint.IndexEndpoint, - Awaitable[index_endpoint.IndexEndpoint] - ]]: - raise NotImplementedError() - - @property - def list_index_endpoints(self) -> Callable[ - [index_endpoint_service.ListIndexEndpointsRequest], - Union[ - index_endpoint_service.ListIndexEndpointsResponse, - Awaitable[index_endpoint_service.ListIndexEndpointsResponse] - ]]: - raise NotImplementedError() - - @property - def update_index_endpoint(self) -> Callable[ - [index_endpoint_service.UpdateIndexEndpointRequest], - Union[ - gca_index_endpoint.IndexEndpoint, - Awaitable[gca_index_endpoint.IndexEndpoint] - ]]: - raise NotImplementedError() - - @property - def delete_index_endpoint(self) -> Callable[ - [index_endpoint_service.DeleteIndexEndpointRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def deploy_index(self) -> Callable[ - [index_endpoint_service.DeployIndexRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def undeploy_index(self) -> Callable[ - [index_endpoint_service.UndeployIndexRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def mutate_deployed_index(self) -> Callable[ - [index_endpoint_service.MutateDeployedIndexRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'IndexEndpointServiceTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc.py deleted file mode 100644 index 5c4460ceb1..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc.py +++ /dev/null @@ -1,462 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers -from google.api_core import operations_v1 -from google.api_core import gapic_v1 -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.aiplatform_v1beta1.types import index_endpoint -from google.cloud.aiplatform_v1beta1.types import index_endpoint as gca_index_endpoint -from google.cloud.aiplatform_v1beta1.types import index_endpoint_service -from google.longrunning import operations_pb2 # type: ignore -from .base import IndexEndpointServiceTransport, DEFAULT_CLIENT_INFO - - -class IndexEndpointServiceGrpcTransport(IndexEndpointServiceTransport): - """gRPC backend transport for IndexEndpointService. - - A service for managing Vertex AI's IndexEndpoints. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_index_endpoint(self) -> Callable[ - [index_endpoint_service.CreateIndexEndpointRequest], - operations_pb2.Operation]: - r"""Return a callable for the create index endpoint method over gRPC. - - Creates an IndexEndpoint. - - Returns: - Callable[[~.CreateIndexEndpointRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_index_endpoint' not in self._stubs: - self._stubs['create_index_endpoint'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.IndexEndpointService/CreateIndexEndpoint', - request_serializer=index_endpoint_service.CreateIndexEndpointRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_index_endpoint'] - - @property - def get_index_endpoint(self) -> Callable[ - [index_endpoint_service.GetIndexEndpointRequest], - index_endpoint.IndexEndpoint]: - r"""Return a callable for the get index endpoint method over gRPC. - - Gets an IndexEndpoint. - - Returns: - Callable[[~.GetIndexEndpointRequest], - ~.IndexEndpoint]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_index_endpoint' not in self._stubs: - self._stubs['get_index_endpoint'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.IndexEndpointService/GetIndexEndpoint', - request_serializer=index_endpoint_service.GetIndexEndpointRequest.serialize, - response_deserializer=index_endpoint.IndexEndpoint.deserialize, - ) - return self._stubs['get_index_endpoint'] - - @property - def list_index_endpoints(self) -> Callable[ - [index_endpoint_service.ListIndexEndpointsRequest], - index_endpoint_service.ListIndexEndpointsResponse]: - r"""Return a callable for the list index endpoints method over gRPC. - - Lists IndexEndpoints in a Location. - - Returns: - Callable[[~.ListIndexEndpointsRequest], - ~.ListIndexEndpointsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_index_endpoints' not in self._stubs: - self._stubs['list_index_endpoints'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.IndexEndpointService/ListIndexEndpoints', - request_serializer=index_endpoint_service.ListIndexEndpointsRequest.serialize, - response_deserializer=index_endpoint_service.ListIndexEndpointsResponse.deserialize, - ) - return self._stubs['list_index_endpoints'] - - @property - def update_index_endpoint(self) -> Callable[ - [index_endpoint_service.UpdateIndexEndpointRequest], - gca_index_endpoint.IndexEndpoint]: - r"""Return a callable for the update index endpoint method over gRPC. - - Updates an IndexEndpoint. - - Returns: - Callable[[~.UpdateIndexEndpointRequest], - ~.IndexEndpoint]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_index_endpoint' not in self._stubs: - self._stubs['update_index_endpoint'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.IndexEndpointService/UpdateIndexEndpoint', - request_serializer=index_endpoint_service.UpdateIndexEndpointRequest.serialize, - response_deserializer=gca_index_endpoint.IndexEndpoint.deserialize, - ) - return self._stubs['update_index_endpoint'] - - @property - def delete_index_endpoint(self) -> Callable[ - [index_endpoint_service.DeleteIndexEndpointRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete index endpoint method over gRPC. - - Deletes an IndexEndpoint. - - Returns: - Callable[[~.DeleteIndexEndpointRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_index_endpoint' not in self._stubs: - self._stubs['delete_index_endpoint'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.IndexEndpointService/DeleteIndexEndpoint', - request_serializer=index_endpoint_service.DeleteIndexEndpointRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_index_endpoint'] - - @property - def deploy_index(self) -> Callable[ - [index_endpoint_service.DeployIndexRequest], - operations_pb2.Operation]: - r"""Return a callable for the deploy index method over gRPC. - - Deploys an Index into this IndexEndpoint, creating a - DeployedIndex within it. - Only non-empty Indexes can be deployed. - - Returns: - Callable[[~.DeployIndexRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'deploy_index' not in self._stubs: - self._stubs['deploy_index'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.IndexEndpointService/DeployIndex', - request_serializer=index_endpoint_service.DeployIndexRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['deploy_index'] - - @property - def undeploy_index(self) -> Callable[ - [index_endpoint_service.UndeployIndexRequest], - operations_pb2.Operation]: - r"""Return a callable for the undeploy index method over gRPC. - - Undeploys an Index from an IndexEndpoint, removing a - DeployedIndex from it, and freeing all resources it's - using. - - Returns: - Callable[[~.UndeployIndexRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'undeploy_index' not in self._stubs: - self._stubs['undeploy_index'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.IndexEndpointService/UndeployIndex', - request_serializer=index_endpoint_service.UndeployIndexRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['undeploy_index'] - - @property - def mutate_deployed_index(self) -> Callable[ - [index_endpoint_service.MutateDeployedIndexRequest], - operations_pb2.Operation]: - r"""Return a callable for the mutate deployed index method over gRPC. - - Update an existing DeployedIndex under an - IndexEndpoint. - - Returns: - Callable[[~.MutateDeployedIndexRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'mutate_deployed_index' not in self._stubs: - self._stubs['mutate_deployed_index'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.IndexEndpointService/MutateDeployedIndex', - request_serializer=index_endpoint_service.MutateDeployedIndexRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['mutate_deployed_index'] - - def close(self): - self.grpc_channel.close() - -__all__ = ( - 'IndexEndpointServiceGrpcTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc_asyncio.py deleted file mode 100644 index 89afca14f3..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/grpc_asyncio.py +++ /dev/null @@ -1,466 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers_async -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.aiplatform_v1beta1.types import index_endpoint -from google.cloud.aiplatform_v1beta1.types import index_endpoint as gca_index_endpoint -from google.cloud.aiplatform_v1beta1.types import index_endpoint_service -from google.longrunning import operations_pb2 # type: ignore -from .base import IndexEndpointServiceTransport, DEFAULT_CLIENT_INFO -from .grpc import IndexEndpointServiceGrpcTransport - - -class IndexEndpointServiceGrpcAsyncIOTransport(IndexEndpointServiceTransport): - """gRPC AsyncIO backend transport for IndexEndpointService. - - A service for managing Vertex AI's IndexEndpoints. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsAsyncClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_index_endpoint(self) -> Callable[ - [index_endpoint_service.CreateIndexEndpointRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the create index endpoint method over gRPC. - - Creates an IndexEndpoint. - - Returns: - Callable[[~.CreateIndexEndpointRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_index_endpoint' not in self._stubs: - self._stubs['create_index_endpoint'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.IndexEndpointService/CreateIndexEndpoint', - request_serializer=index_endpoint_service.CreateIndexEndpointRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_index_endpoint'] - - @property - def get_index_endpoint(self) -> Callable[ - [index_endpoint_service.GetIndexEndpointRequest], - Awaitable[index_endpoint.IndexEndpoint]]: - r"""Return a callable for the get index endpoint method over gRPC. - - Gets an IndexEndpoint. - - Returns: - Callable[[~.GetIndexEndpointRequest], - Awaitable[~.IndexEndpoint]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_index_endpoint' not in self._stubs: - self._stubs['get_index_endpoint'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.IndexEndpointService/GetIndexEndpoint', - request_serializer=index_endpoint_service.GetIndexEndpointRequest.serialize, - response_deserializer=index_endpoint.IndexEndpoint.deserialize, - ) - return self._stubs['get_index_endpoint'] - - @property - def list_index_endpoints(self) -> Callable[ - [index_endpoint_service.ListIndexEndpointsRequest], - Awaitable[index_endpoint_service.ListIndexEndpointsResponse]]: - r"""Return a callable for the list index endpoints method over gRPC. - - Lists IndexEndpoints in a Location. - - Returns: - Callable[[~.ListIndexEndpointsRequest], - Awaitable[~.ListIndexEndpointsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_index_endpoints' not in self._stubs: - self._stubs['list_index_endpoints'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.IndexEndpointService/ListIndexEndpoints', - request_serializer=index_endpoint_service.ListIndexEndpointsRequest.serialize, - response_deserializer=index_endpoint_service.ListIndexEndpointsResponse.deserialize, - ) - return self._stubs['list_index_endpoints'] - - @property - def update_index_endpoint(self) -> Callable[ - [index_endpoint_service.UpdateIndexEndpointRequest], - Awaitable[gca_index_endpoint.IndexEndpoint]]: - r"""Return a callable for the update index endpoint method over gRPC. - - Updates an IndexEndpoint. - - Returns: - Callable[[~.UpdateIndexEndpointRequest], - Awaitable[~.IndexEndpoint]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_index_endpoint' not in self._stubs: - self._stubs['update_index_endpoint'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.IndexEndpointService/UpdateIndexEndpoint', - request_serializer=index_endpoint_service.UpdateIndexEndpointRequest.serialize, - response_deserializer=gca_index_endpoint.IndexEndpoint.deserialize, - ) - return self._stubs['update_index_endpoint'] - - @property - def delete_index_endpoint(self) -> Callable[ - [index_endpoint_service.DeleteIndexEndpointRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete index endpoint method over gRPC. - - Deletes an IndexEndpoint. - - Returns: - Callable[[~.DeleteIndexEndpointRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_index_endpoint' not in self._stubs: - self._stubs['delete_index_endpoint'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.IndexEndpointService/DeleteIndexEndpoint', - request_serializer=index_endpoint_service.DeleteIndexEndpointRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_index_endpoint'] - - @property - def deploy_index(self) -> Callable[ - [index_endpoint_service.DeployIndexRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the deploy index method over gRPC. - - Deploys an Index into this IndexEndpoint, creating a - DeployedIndex within it. - Only non-empty Indexes can be deployed. - - Returns: - Callable[[~.DeployIndexRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'deploy_index' not in self._stubs: - self._stubs['deploy_index'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.IndexEndpointService/DeployIndex', - request_serializer=index_endpoint_service.DeployIndexRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['deploy_index'] - - @property - def undeploy_index(self) -> Callable[ - [index_endpoint_service.UndeployIndexRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the undeploy index method over gRPC. - - Undeploys an Index from an IndexEndpoint, removing a - DeployedIndex from it, and freeing all resources it's - using. - - Returns: - Callable[[~.UndeployIndexRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'undeploy_index' not in self._stubs: - self._stubs['undeploy_index'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.IndexEndpointService/UndeployIndex', - request_serializer=index_endpoint_service.UndeployIndexRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['undeploy_index'] - - @property - def mutate_deployed_index(self) -> Callable[ - [index_endpoint_service.MutateDeployedIndexRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the mutate deployed index method over gRPC. - - Update an existing DeployedIndex under an - IndexEndpoint. - - Returns: - Callable[[~.MutateDeployedIndexRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'mutate_deployed_index' not in self._stubs: - self._stubs['mutate_deployed_index'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.IndexEndpointService/MutateDeployedIndex', - request_serializer=index_endpoint_service.MutateDeployedIndexRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['mutate_deployed_index'] - - def close(self): - return self.grpc_channel.close() - - -__all__ = ( - 'IndexEndpointServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/__init__.py deleted file mode 100644 index d2a09db9f1..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import IndexServiceClient -from .async_client import IndexServiceAsyncClient - -__all__ = ( - 'IndexServiceClient', - 'IndexServiceAsyncClient', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/async_client.py deleted file mode 100644 index 4cff9dc98d..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/async_client.py +++ /dev/null @@ -1,640 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core.client_options import ClientOptions -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.index_service import pagers -from google.cloud.aiplatform_v1beta1.types import deployed_index_ref -from google.cloud.aiplatform_v1beta1.types import index -from google.cloud.aiplatform_v1beta1.types import index as gca_index -from google.cloud.aiplatform_v1beta1.types import index_service -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import IndexServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import IndexServiceGrpcAsyncIOTransport -from .client import IndexServiceClient - - -class IndexServiceAsyncClient: - """A service for creating and managing Vertex AI's Index - resources. - """ - - _client: IndexServiceClient - - DEFAULT_ENDPOINT = IndexServiceClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = IndexServiceClient.DEFAULT_MTLS_ENDPOINT - - index_path = staticmethod(IndexServiceClient.index_path) - parse_index_path = staticmethod(IndexServiceClient.parse_index_path) - index_endpoint_path = staticmethod(IndexServiceClient.index_endpoint_path) - parse_index_endpoint_path = staticmethod(IndexServiceClient.parse_index_endpoint_path) - common_billing_account_path = staticmethod(IndexServiceClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(IndexServiceClient.parse_common_billing_account_path) - common_folder_path = staticmethod(IndexServiceClient.common_folder_path) - parse_common_folder_path = staticmethod(IndexServiceClient.parse_common_folder_path) - common_organization_path = staticmethod(IndexServiceClient.common_organization_path) - parse_common_organization_path = staticmethod(IndexServiceClient.parse_common_organization_path) - common_project_path = staticmethod(IndexServiceClient.common_project_path) - parse_common_project_path = staticmethod(IndexServiceClient.parse_common_project_path) - common_location_path = staticmethod(IndexServiceClient.common_location_path) - parse_common_location_path = staticmethod(IndexServiceClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - IndexServiceAsyncClient: The constructed client. - """ - return IndexServiceClient.from_service_account_info.__func__(IndexServiceAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - IndexServiceAsyncClient: The constructed client. - """ - return IndexServiceClient.from_service_account_file.__func__(IndexServiceAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> IndexServiceTransport: - """Returns the transport used by the client instance. - - Returns: - IndexServiceTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(IndexServiceClient).get_transport_class, type(IndexServiceClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, IndexServiceTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the index service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.IndexServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = IndexServiceClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def create_index(self, - request: Union[index_service.CreateIndexRequest, dict] = None, - *, - parent: str = None, - index: gca_index.Index = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Creates an Index. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CreateIndexRequest, dict]): - The request object. Request message for - [IndexService.CreateIndex][google.cloud.aiplatform.v1beta1.IndexService.CreateIndex]. - parent (:class:`str`): - Required. The resource name of the Location to create - the Index in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - index (:class:`google.cloud.aiplatform_v1beta1.types.Index`): - Required. The Index to create. - This corresponds to the ``index`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Index` A representation of a collection of database items organized in a way that - allows for approximate nearest neighbor (a.k.a ANN) - algorithms search. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, index]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = index_service.CreateIndexRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if index is not None: - request.index = index - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_index, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - gca_index.Index, - metadata_type=index_service.CreateIndexOperationMetadata, - ) - - # Done; return the response. - return response - - async def get_index(self, - request: Union[index_service.GetIndexRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> index.Index: - r"""Gets an Index. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.GetIndexRequest, dict]): - The request object. Request message for - [IndexService.GetIndex][google.cloud.aiplatform.v1beta1.IndexService.GetIndex] - name (:class:`str`): - Required. The name of the Index resource. Format: - ``projects/{project}/locations/{location}/indexes/{index}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Index: - A representation of a collection of - database items organized in a way that - allows for approximate nearest neighbor - (a.k.a ANN) algorithms search. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = index_service.GetIndexRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_index, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_indexes(self, - request: Union[index_service.ListIndexesRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListIndexesAsyncPager: - r"""Lists Indexes in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListIndexesRequest, dict]): - The request object. Request message for - [IndexService.ListIndexes][google.cloud.aiplatform.v1beta1.IndexService.ListIndexes]. - parent (:class:`str`): - Required. The resource name of the Location from which - to list the Indexes. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.index_service.pagers.ListIndexesAsyncPager: - Response message for - [IndexService.ListIndexes][google.cloud.aiplatform.v1beta1.IndexService.ListIndexes]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = index_service.ListIndexesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_indexes, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListIndexesAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_index(self, - request: Union[index_service.UpdateIndexRequest, dict] = None, - *, - index: gca_index.Index = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Updates an Index. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.UpdateIndexRequest, dict]): - The request object. Request message for - [IndexService.UpdateIndex][google.cloud.aiplatform.v1beta1.IndexService.UpdateIndex]. - index (:class:`google.cloud.aiplatform_v1beta1.types.Index`): - Required. The Index which updates the - resource on the server. - - This corresponds to the ``index`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - The update mask applies to the resource. For the - ``FieldMask`` definition, see - [google.protobuf.FieldMask][google.protobuf.FieldMask]. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Index` A representation of a collection of database items organized in a way that - allows for approximate nearest neighbor (a.k.a ANN) - algorithms search. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([index, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = index_service.UpdateIndexRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if index is not None: - request.index = index - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_index, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("index.name", request.index.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - gca_index.Index, - metadata_type=index_service.UpdateIndexOperationMetadata, - ) - - # Done; return the response. - return response - - async def delete_index(self, - request: Union[index_service.DeleteIndexRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes an Index. An Index can only be deleted when all its - [DeployedIndexes][google.cloud.aiplatform.v1beta1.Index.deployed_indexes] - had been undeployed. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.DeleteIndexRequest, dict]): - The request object. Request message for - [IndexService.DeleteIndex][google.cloud.aiplatform.v1beta1.IndexService.DeleteIndex]. - name (:class:`str`): - Required. The name of the Index resource to be deleted. - Format: - ``projects/{project}/locations/{location}/indexes/{index}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = index_service.DeleteIndexRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_index, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def __aenter__(self): - return self - - async def __aexit__(self, exc_type, exc, tb): - await self.transport.close() - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "IndexServiceAsyncClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/client.py deleted file mode 100644 index 256526b509..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/client.py +++ /dev/null @@ -1,847 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import os -import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.index_service import pagers -from google.cloud.aiplatform_v1beta1.types import deployed_index_ref -from google.cloud.aiplatform_v1beta1.types import index -from google.cloud.aiplatform_v1beta1.types import index as gca_index -from google.cloud.aiplatform_v1beta1.types import index_service -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import IndexServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import IndexServiceGrpcTransport -from .transports.grpc_asyncio import IndexServiceGrpcAsyncIOTransport - - -class IndexServiceClientMeta(type): - """Metaclass for the IndexService client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[IndexServiceTransport]] - _transport_registry["grpc"] = IndexServiceGrpcTransport - _transport_registry["grpc_asyncio"] = IndexServiceGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[IndexServiceTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class IndexServiceClient(metaclass=IndexServiceClientMeta): - """A service for creating and managing Vertex AI's Index - resources. - """ - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "aiplatform.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - IndexServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - IndexServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> IndexServiceTransport: - """Returns the transport used by the client instance. - - Returns: - IndexServiceTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def index_path(project: str,location: str,index: str,) -> str: - """Returns a fully-qualified index string.""" - return "projects/{project}/locations/{location}/indexes/{index}".format(project=project, location=location, index=index, ) - - @staticmethod - def parse_index_path(path: str) -> Dict[str,str]: - """Parses a index path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/indexes/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def index_endpoint_path(project: str,location: str,index_endpoint: str,) -> str: - """Returns a fully-qualified index_endpoint string.""" - return "projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}".format(project=project, location=location, index_endpoint=index_endpoint, ) - - @staticmethod - def parse_index_endpoint_path(path: str) -> Dict[str,str]: - """Parses a index_endpoint path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/indexEndpoints/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, IndexServiceTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the index service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, IndexServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): - raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, IndexServiceTransport): - # transport is a IndexServiceTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - always_use_jwt_access=True, - ) - - def create_index(self, - request: Union[index_service.CreateIndexRequest, dict] = None, - *, - parent: str = None, - index: gca_index.Index = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Creates an Index. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CreateIndexRequest, dict]): - The request object. Request message for - [IndexService.CreateIndex][google.cloud.aiplatform.v1beta1.IndexService.CreateIndex]. - parent (str): - Required. The resource name of the Location to create - the Index in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - index (google.cloud.aiplatform_v1beta1.types.Index): - Required. The Index to create. - This corresponds to the ``index`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Index` A representation of a collection of database items organized in a way that - allows for approximate nearest neighbor (a.k.a ANN) - algorithms search. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, index]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a index_service.CreateIndexRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, index_service.CreateIndexRequest): - request = index_service.CreateIndexRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if index is not None: - request.index = index - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_index] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - gca_index.Index, - metadata_type=index_service.CreateIndexOperationMetadata, - ) - - # Done; return the response. - return response - - def get_index(self, - request: Union[index_service.GetIndexRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> index.Index: - r"""Gets an Index. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.GetIndexRequest, dict]): - The request object. Request message for - [IndexService.GetIndex][google.cloud.aiplatform.v1beta1.IndexService.GetIndex] - name (str): - Required. The name of the Index resource. Format: - ``projects/{project}/locations/{location}/indexes/{index}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Index: - A representation of a collection of - database items organized in a way that - allows for approximate nearest neighbor - (a.k.a ANN) algorithms search. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a index_service.GetIndexRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, index_service.GetIndexRequest): - request = index_service.GetIndexRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_index] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_indexes(self, - request: Union[index_service.ListIndexesRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListIndexesPager: - r"""Lists Indexes in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListIndexesRequest, dict]): - The request object. Request message for - [IndexService.ListIndexes][google.cloud.aiplatform.v1beta1.IndexService.ListIndexes]. - parent (str): - Required. The resource name of the Location from which - to list the Indexes. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.index_service.pagers.ListIndexesPager: - Response message for - [IndexService.ListIndexes][google.cloud.aiplatform.v1beta1.IndexService.ListIndexes]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a index_service.ListIndexesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, index_service.ListIndexesRequest): - request = index_service.ListIndexesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_indexes] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListIndexesPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_index(self, - request: Union[index_service.UpdateIndexRequest, dict] = None, - *, - index: gca_index.Index = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Updates an Index. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.UpdateIndexRequest, dict]): - The request object. Request message for - [IndexService.UpdateIndex][google.cloud.aiplatform.v1beta1.IndexService.UpdateIndex]. - index (google.cloud.aiplatform_v1beta1.types.Index): - Required. The Index which updates the - resource on the server. - - This corresponds to the ``index`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - The update mask applies to the resource. For the - ``FieldMask`` definition, see - [google.protobuf.FieldMask][google.protobuf.FieldMask]. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Index` A representation of a collection of database items organized in a way that - allows for approximate nearest neighbor (a.k.a ANN) - algorithms search. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([index, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a index_service.UpdateIndexRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, index_service.UpdateIndexRequest): - request = index_service.UpdateIndexRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if index is not None: - request.index = index - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_index] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("index.name", request.index.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - gca_index.Index, - metadata_type=index_service.UpdateIndexOperationMetadata, - ) - - # Done; return the response. - return response - - def delete_index(self, - request: Union[index_service.DeleteIndexRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes an Index. An Index can only be deleted when all its - [DeployedIndexes][google.cloud.aiplatform.v1beta1.Index.deployed_indexes] - had been undeployed. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.DeleteIndexRequest, dict]): - The request object. Request message for - [IndexService.DeleteIndex][google.cloud.aiplatform.v1beta1.IndexService.DeleteIndex]. - name (str): - Required. The name of the Index resource to be deleted. - Format: - ``projects/{project}/locations/{location}/indexes/{index}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a index_service.DeleteIndexRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, index_service.DeleteIndexRequest): - request = index_service.DeleteIndexRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_index] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - """Releases underlying transport's resources. - - .. warning:: - ONLY use as a context manager if the transport is NOT shared - with other clients! Exiting the with block will CLOSE the transport - and may cause errors in other clients! - """ - self.transport.close() - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "IndexServiceClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/pagers.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/pagers.py deleted file mode 100644 index 68972898b1..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/pagers.py +++ /dev/null @@ -1,141 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator - -from google.cloud.aiplatform_v1beta1.types import index -from google.cloud.aiplatform_v1beta1.types import index_service - - -class ListIndexesPager: - """A pager for iterating through ``list_indexes`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListIndexesResponse` object, and - provides an ``__iter__`` method to iterate through its - ``indexes`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListIndexes`` requests and continue to iterate - through the ``indexes`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListIndexesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., index_service.ListIndexesResponse], - request: index_service.ListIndexesRequest, - response: index_service.ListIndexesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListIndexesRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListIndexesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = index_service.ListIndexesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[index_service.ListIndexesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[index.Index]: - for page in self.pages: - yield from page.indexes - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListIndexesAsyncPager: - """A pager for iterating through ``list_indexes`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListIndexesResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``indexes`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListIndexes`` requests and continue to iterate - through the ``indexes`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListIndexesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[index_service.ListIndexesResponse]], - request: index_service.ListIndexesRequest, - response: index_service.ListIndexesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListIndexesRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListIndexesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = index_service.ListIndexesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[index_service.ListIndexesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[index.Index]: - async def async_generator(): - async for page in self.pages: - for response in page.indexes: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/__init__.py deleted file mode 100644 index 2f263f2fb8..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import IndexServiceTransport -from .grpc import IndexServiceGrpcTransport -from .grpc_asyncio import IndexServiceGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[IndexServiceTransport]] -_transport_registry['grpc'] = IndexServiceGrpcTransport -_transport_registry['grpc_asyncio'] = IndexServiceGrpcAsyncIOTransport - -__all__ = ( - 'IndexServiceTransport', - 'IndexServiceGrpcTransport', - 'IndexServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/base.py deleted file mode 100644 index 0665317324..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/base.py +++ /dev/null @@ -1,210 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import pkg_resources - -import google.auth # type: ignore -import google.api_core -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.aiplatform_v1beta1.types import index -from google.cloud.aiplatform_v1beta1.types import index_service -from google.longrunning import operations_pb2 # type: ignore - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-aiplatform', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -class IndexServiceTransport(abc.ABC): - """Abstract transport class for IndexService.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'aiplatform.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials are service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.create_index: gapic_v1.method.wrap_method( - self.create_index, - default_timeout=5.0, - client_info=client_info, - ), - self.get_index: gapic_v1.method.wrap_method( - self.get_index, - default_timeout=5.0, - client_info=client_info, - ), - self.list_indexes: gapic_v1.method.wrap_method( - self.list_indexes, - default_timeout=5.0, - client_info=client_info, - ), - self.update_index: gapic_v1.method.wrap_method( - self.update_index, - default_timeout=5.0, - client_info=client_info, - ), - self.delete_index: gapic_v1.method.wrap_method( - self.delete_index, - default_timeout=5.0, - client_info=client_info, - ), - } - - def close(self): - """Closes resources associated with the transport. - - .. warning:: - Only call this method if the transport is NOT shared - with other clients - this may cause errors in other clients! - """ - raise NotImplementedError() - - @property - def operations_client(self): - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def create_index(self) -> Callable[ - [index_service.CreateIndexRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def get_index(self) -> Callable[ - [index_service.GetIndexRequest], - Union[ - index.Index, - Awaitable[index.Index] - ]]: - raise NotImplementedError() - - @property - def list_indexes(self) -> Callable[ - [index_service.ListIndexesRequest], - Union[ - index_service.ListIndexesResponse, - Awaitable[index_service.ListIndexesResponse] - ]]: - raise NotImplementedError() - - @property - def update_index(self) -> Callable[ - [index_service.UpdateIndexRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def delete_index(self) -> Callable[ - [index_service.DeleteIndexRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'IndexServiceTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc.py deleted file mode 100644 index f9f08eb221..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc.py +++ /dev/null @@ -1,381 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers -from google.api_core import operations_v1 -from google.api_core import gapic_v1 -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.aiplatform_v1beta1.types import index -from google.cloud.aiplatform_v1beta1.types import index_service -from google.longrunning import operations_pb2 # type: ignore -from .base import IndexServiceTransport, DEFAULT_CLIENT_INFO - - -class IndexServiceGrpcTransport(IndexServiceTransport): - """gRPC backend transport for IndexService. - - A service for creating and managing Vertex AI's Index - resources. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_index(self) -> Callable[ - [index_service.CreateIndexRequest], - operations_pb2.Operation]: - r"""Return a callable for the create index method over gRPC. - - Creates an Index. - - Returns: - Callable[[~.CreateIndexRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_index' not in self._stubs: - self._stubs['create_index'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.IndexService/CreateIndex', - request_serializer=index_service.CreateIndexRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_index'] - - @property - def get_index(self) -> Callable[ - [index_service.GetIndexRequest], - index.Index]: - r"""Return a callable for the get index method over gRPC. - - Gets an Index. - - Returns: - Callable[[~.GetIndexRequest], - ~.Index]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_index' not in self._stubs: - self._stubs['get_index'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.IndexService/GetIndex', - request_serializer=index_service.GetIndexRequest.serialize, - response_deserializer=index.Index.deserialize, - ) - return self._stubs['get_index'] - - @property - def list_indexes(self) -> Callable[ - [index_service.ListIndexesRequest], - index_service.ListIndexesResponse]: - r"""Return a callable for the list indexes method over gRPC. - - Lists Indexes in a Location. - - Returns: - Callable[[~.ListIndexesRequest], - ~.ListIndexesResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_indexes' not in self._stubs: - self._stubs['list_indexes'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.IndexService/ListIndexes', - request_serializer=index_service.ListIndexesRequest.serialize, - response_deserializer=index_service.ListIndexesResponse.deserialize, - ) - return self._stubs['list_indexes'] - - @property - def update_index(self) -> Callable[ - [index_service.UpdateIndexRequest], - operations_pb2.Operation]: - r"""Return a callable for the update index method over gRPC. - - Updates an Index. - - Returns: - Callable[[~.UpdateIndexRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_index' not in self._stubs: - self._stubs['update_index'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.IndexService/UpdateIndex', - request_serializer=index_service.UpdateIndexRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['update_index'] - - @property - def delete_index(self) -> Callable[ - [index_service.DeleteIndexRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete index method over gRPC. - - Deletes an Index. An Index can only be deleted when all its - [DeployedIndexes][google.cloud.aiplatform.v1beta1.Index.deployed_indexes] - had been undeployed. - - Returns: - Callable[[~.DeleteIndexRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_index' not in self._stubs: - self._stubs['delete_index'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.IndexService/DeleteIndex', - request_serializer=index_service.DeleteIndexRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_index'] - - def close(self): - self.grpc_channel.close() - -__all__ = ( - 'IndexServiceGrpcTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc_asyncio.py deleted file mode 100644 index 99fcddf464..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/index_service/transports/grpc_asyncio.py +++ /dev/null @@ -1,385 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers_async -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.aiplatform_v1beta1.types import index -from google.cloud.aiplatform_v1beta1.types import index_service -from google.longrunning import operations_pb2 # type: ignore -from .base import IndexServiceTransport, DEFAULT_CLIENT_INFO -from .grpc import IndexServiceGrpcTransport - - -class IndexServiceGrpcAsyncIOTransport(IndexServiceTransport): - """gRPC AsyncIO backend transport for IndexService. - - A service for creating and managing Vertex AI's Index - resources. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsAsyncClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_index(self) -> Callable[ - [index_service.CreateIndexRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the create index method over gRPC. - - Creates an Index. - - Returns: - Callable[[~.CreateIndexRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_index' not in self._stubs: - self._stubs['create_index'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.IndexService/CreateIndex', - request_serializer=index_service.CreateIndexRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_index'] - - @property - def get_index(self) -> Callable[ - [index_service.GetIndexRequest], - Awaitable[index.Index]]: - r"""Return a callable for the get index method over gRPC. - - Gets an Index. - - Returns: - Callable[[~.GetIndexRequest], - Awaitable[~.Index]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_index' not in self._stubs: - self._stubs['get_index'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.IndexService/GetIndex', - request_serializer=index_service.GetIndexRequest.serialize, - response_deserializer=index.Index.deserialize, - ) - return self._stubs['get_index'] - - @property - def list_indexes(self) -> Callable[ - [index_service.ListIndexesRequest], - Awaitable[index_service.ListIndexesResponse]]: - r"""Return a callable for the list indexes method over gRPC. - - Lists Indexes in a Location. - - Returns: - Callable[[~.ListIndexesRequest], - Awaitable[~.ListIndexesResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_indexes' not in self._stubs: - self._stubs['list_indexes'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.IndexService/ListIndexes', - request_serializer=index_service.ListIndexesRequest.serialize, - response_deserializer=index_service.ListIndexesResponse.deserialize, - ) - return self._stubs['list_indexes'] - - @property - def update_index(self) -> Callable[ - [index_service.UpdateIndexRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the update index method over gRPC. - - Updates an Index. - - Returns: - Callable[[~.UpdateIndexRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_index' not in self._stubs: - self._stubs['update_index'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.IndexService/UpdateIndex', - request_serializer=index_service.UpdateIndexRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['update_index'] - - @property - def delete_index(self) -> Callable[ - [index_service.DeleteIndexRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete index method over gRPC. - - Deletes an Index. An Index can only be deleted when all its - [DeployedIndexes][google.cloud.aiplatform.v1beta1.Index.deployed_indexes] - had been undeployed. - - Returns: - Callable[[~.DeleteIndexRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_index' not in self._stubs: - self._stubs['delete_index'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.IndexService/DeleteIndex', - request_serializer=index_service.DeleteIndexRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_index'] - - def close(self): - return self.grpc_channel.close() - - -__all__ = ( - 'IndexServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/__init__.py deleted file mode 100644 index 817e1b49e2..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import JobServiceClient -from .async_client import JobServiceAsyncClient - -__all__ = ( - 'JobServiceClient', - 'JobServiceAsyncClient', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py deleted file mode 100644 index 4bc4bddbf8..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/async_client.py +++ /dev/null @@ -1,2649 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core.client_options import ClientOptions -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.job_service import pagers -from google.cloud.aiplatform_v1beta1.types import batch_prediction_job -from google.cloud.aiplatform_v1beta1.types import batch_prediction_job as gca_batch_prediction_job -from google.cloud.aiplatform_v1beta1.types import completion_stats -from google.cloud.aiplatform_v1beta1.types import custom_job -from google.cloud.aiplatform_v1beta1.types import custom_job as gca_custom_job -from google.cloud.aiplatform_v1beta1.types import data_labeling_job -from google.cloud.aiplatform_v1beta1.types import data_labeling_job as gca_data_labeling_job -from google.cloud.aiplatform_v1beta1.types import encryption_spec -from google.cloud.aiplatform_v1beta1.types import explanation -from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job -from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job as gca_hyperparameter_tuning_job -from google.cloud.aiplatform_v1beta1.types import io -from google.cloud.aiplatform_v1beta1.types import job_service -from google.cloud.aiplatform_v1beta1.types import job_state -from google.cloud.aiplatform_v1beta1.types import machine_resources -from google.cloud.aiplatform_v1beta1.types import manual_batch_tuning_parameters -from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job -from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job as gca_model_deployment_monitoring_job -from google.cloud.aiplatform_v1beta1.types import model_monitoring -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.cloud.aiplatform_v1beta1.types import study -from google.cloud.aiplatform_v1beta1.types import unmanaged_container_model -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore -from google.type import money_pb2 # type: ignore -from .transports.base import JobServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import JobServiceGrpcAsyncIOTransport -from .client import JobServiceClient - - -class JobServiceAsyncClient: - """A service for creating and managing Vertex AI's jobs.""" - - _client: JobServiceClient - - DEFAULT_ENDPOINT = JobServiceClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = JobServiceClient.DEFAULT_MTLS_ENDPOINT - - batch_prediction_job_path = staticmethod(JobServiceClient.batch_prediction_job_path) - parse_batch_prediction_job_path = staticmethod(JobServiceClient.parse_batch_prediction_job_path) - custom_job_path = staticmethod(JobServiceClient.custom_job_path) - parse_custom_job_path = staticmethod(JobServiceClient.parse_custom_job_path) - data_labeling_job_path = staticmethod(JobServiceClient.data_labeling_job_path) - parse_data_labeling_job_path = staticmethod(JobServiceClient.parse_data_labeling_job_path) - dataset_path = staticmethod(JobServiceClient.dataset_path) - parse_dataset_path = staticmethod(JobServiceClient.parse_dataset_path) - endpoint_path = staticmethod(JobServiceClient.endpoint_path) - parse_endpoint_path = staticmethod(JobServiceClient.parse_endpoint_path) - hyperparameter_tuning_job_path = staticmethod(JobServiceClient.hyperparameter_tuning_job_path) - parse_hyperparameter_tuning_job_path = staticmethod(JobServiceClient.parse_hyperparameter_tuning_job_path) - model_path = staticmethod(JobServiceClient.model_path) - parse_model_path = staticmethod(JobServiceClient.parse_model_path) - model_deployment_monitoring_job_path = staticmethod(JobServiceClient.model_deployment_monitoring_job_path) - parse_model_deployment_monitoring_job_path = staticmethod(JobServiceClient.parse_model_deployment_monitoring_job_path) - network_path = staticmethod(JobServiceClient.network_path) - parse_network_path = staticmethod(JobServiceClient.parse_network_path) - tensorboard_path = staticmethod(JobServiceClient.tensorboard_path) - parse_tensorboard_path = staticmethod(JobServiceClient.parse_tensorboard_path) - trial_path = staticmethod(JobServiceClient.trial_path) - parse_trial_path = staticmethod(JobServiceClient.parse_trial_path) - common_billing_account_path = staticmethod(JobServiceClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(JobServiceClient.parse_common_billing_account_path) - common_folder_path = staticmethod(JobServiceClient.common_folder_path) - parse_common_folder_path = staticmethod(JobServiceClient.parse_common_folder_path) - common_organization_path = staticmethod(JobServiceClient.common_organization_path) - parse_common_organization_path = staticmethod(JobServiceClient.parse_common_organization_path) - common_project_path = staticmethod(JobServiceClient.common_project_path) - parse_common_project_path = staticmethod(JobServiceClient.parse_common_project_path) - common_location_path = staticmethod(JobServiceClient.common_location_path) - parse_common_location_path = staticmethod(JobServiceClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - JobServiceAsyncClient: The constructed client. - """ - return JobServiceClient.from_service_account_info.__func__(JobServiceAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - JobServiceAsyncClient: The constructed client. - """ - return JobServiceClient.from_service_account_file.__func__(JobServiceAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> JobServiceTransport: - """Returns the transport used by the client instance. - - Returns: - JobServiceTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(JobServiceClient).get_transport_class, type(JobServiceClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, JobServiceTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the job service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.JobServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = JobServiceClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def create_custom_job(self, - request: Union[job_service.CreateCustomJobRequest, dict] = None, - *, - parent: str = None, - custom_job: gca_custom_job.CustomJob = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_custom_job.CustomJob: - r"""Creates a CustomJob. A created CustomJob right away - will be attempted to be run. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CreateCustomJobRequest, dict]): - The request object. Request message for - [JobService.CreateCustomJob][google.cloud.aiplatform.v1beta1.JobService.CreateCustomJob]. - parent (:class:`str`): - Required. The resource name of the Location to create - the CustomJob in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - custom_job (:class:`google.cloud.aiplatform_v1beta1.types.CustomJob`): - Required. The CustomJob to create. - This corresponds to the ``custom_job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.CustomJob: - Represents a job that runs custom - workloads such as a Docker container or - a Python package. A CustomJob can have - multiple worker pools and each worker - pool can have its own machine and input - spec. A CustomJob will be cleaned up - once the job enters terminal state - (failed or succeeded). - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, custom_job]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.CreateCustomJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if custom_job is not None: - request.custom_job = custom_job - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_custom_job, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_custom_job(self, - request: Union[job_service.GetCustomJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> custom_job.CustomJob: - r"""Gets a CustomJob. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.GetCustomJobRequest, dict]): - The request object. Request message for - [JobService.GetCustomJob][google.cloud.aiplatform.v1beta1.JobService.GetCustomJob]. - name (:class:`str`): - Required. The name of the CustomJob resource. Format: - ``projects/{project}/locations/{location}/customJobs/{custom_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.CustomJob: - Represents a job that runs custom - workloads such as a Docker container or - a Python package. A CustomJob can have - multiple worker pools and each worker - pool can have its own machine and input - spec. A CustomJob will be cleaned up - once the job enters terminal state - (failed or succeeded). - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.GetCustomJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_custom_job, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_custom_jobs(self, - request: Union[job_service.ListCustomJobsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListCustomJobsAsyncPager: - r"""Lists CustomJobs in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListCustomJobsRequest, dict]): - The request object. Request message for - [JobService.ListCustomJobs][google.cloud.aiplatform.v1beta1.JobService.ListCustomJobs]. - parent (:class:`str`): - Required. The resource name of the Location to list the - CustomJobs from. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListCustomJobsAsyncPager: - Response message for - [JobService.ListCustomJobs][google.cloud.aiplatform.v1beta1.JobService.ListCustomJobs] - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.ListCustomJobsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_custom_jobs, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListCustomJobsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_custom_job(self, - request: Union[job_service.DeleteCustomJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a CustomJob. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.DeleteCustomJobRequest, dict]): - The request object. Request message for - [JobService.DeleteCustomJob][google.cloud.aiplatform.v1beta1.JobService.DeleteCustomJob]. - name (:class:`str`): - Required. The name of the CustomJob resource to be - deleted. Format: - ``projects/{project}/locations/{location}/customJobs/{custom_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.DeleteCustomJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_custom_job, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def cancel_custom_job(self, - request: Union[job_service.CancelCustomJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Cancels a CustomJob. Starts asynchronous cancellation on the - CustomJob. The server makes a best effort to cancel the job, but - success is not guaranteed. Clients can use - [JobService.GetCustomJob][google.cloud.aiplatform.v1beta1.JobService.GetCustomJob] - or other methods to check whether the cancellation succeeded or - whether the job completed despite cancellation. On successful - cancellation, the CustomJob is not deleted; instead it becomes a - job with a - [CustomJob.error][google.cloud.aiplatform.v1beta1.CustomJob.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [CustomJob.state][google.cloud.aiplatform.v1beta1.CustomJob.state] - is set to ``CANCELLED``. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CancelCustomJobRequest, dict]): - The request object. Request message for - [JobService.CancelCustomJob][google.cloud.aiplatform.v1beta1.JobService.CancelCustomJob]. - name (:class:`str`): - Required. The name of the CustomJob to cancel. Format: - ``projects/{project}/locations/{location}/customJobs/{custom_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.CancelCustomJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.cancel_custom_job, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - async def create_data_labeling_job(self, - request: Union[job_service.CreateDataLabelingJobRequest, dict] = None, - *, - parent: str = None, - data_labeling_job: gca_data_labeling_job.DataLabelingJob = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_data_labeling_job.DataLabelingJob: - r"""Creates a DataLabelingJob. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CreateDataLabelingJobRequest, dict]): - The request object. Request message for - [JobService.CreateDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.CreateDataLabelingJob]. - parent (:class:`str`): - Required. The parent of the DataLabelingJob. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - data_labeling_job (:class:`google.cloud.aiplatform_v1beta1.types.DataLabelingJob`): - Required. The DataLabelingJob to - create. - - This corresponds to the ``data_labeling_job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.DataLabelingJob: - DataLabelingJob is used to trigger a - human labeling job on unlabeled data - from the following Dataset: - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, data_labeling_job]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.CreateDataLabelingJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if data_labeling_job is not None: - request.data_labeling_job = data_labeling_job - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_data_labeling_job, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_data_labeling_job(self, - request: Union[job_service.GetDataLabelingJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> data_labeling_job.DataLabelingJob: - r"""Gets a DataLabelingJob. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.GetDataLabelingJobRequest, dict]): - The request object. Request message for - [JobService.GetDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.GetDataLabelingJob]. - name (:class:`str`): - Required. The name of the DataLabelingJob. Format: - ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.DataLabelingJob: - DataLabelingJob is used to trigger a - human labeling job on unlabeled data - from the following Dataset: - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.GetDataLabelingJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_data_labeling_job, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_data_labeling_jobs(self, - request: Union[job_service.ListDataLabelingJobsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListDataLabelingJobsAsyncPager: - r"""Lists DataLabelingJobs in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListDataLabelingJobsRequest, dict]): - The request object. Request message for - [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1beta1.JobService.ListDataLabelingJobs]. - parent (:class:`str`): - Required. The parent of the DataLabelingJob. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListDataLabelingJobsAsyncPager: - Response message for - [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1beta1.JobService.ListDataLabelingJobs]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.ListDataLabelingJobsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_data_labeling_jobs, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListDataLabelingJobsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_data_labeling_job(self, - request: Union[job_service.DeleteDataLabelingJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a DataLabelingJob. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.DeleteDataLabelingJobRequest, dict]): - The request object. Request message for - [JobService.DeleteDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.DeleteDataLabelingJob]. - name (:class:`str`): - Required. The name of the DataLabelingJob to be deleted. - Format: - ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.DeleteDataLabelingJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_data_labeling_job, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def cancel_data_labeling_job(self, - request: Union[job_service.CancelDataLabelingJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Cancels a DataLabelingJob. Success of cancellation is - not guaranteed. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CancelDataLabelingJobRequest, dict]): - The request object. Request message for - [JobService.CancelDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.CancelDataLabelingJob]. - name (:class:`str`): - Required. The name of the DataLabelingJob. Format: - ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.CancelDataLabelingJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.cancel_data_labeling_job, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - async def create_hyperparameter_tuning_job(self, - request: Union[job_service.CreateHyperparameterTuningJobRequest, dict] = None, - *, - parent: str = None, - hyperparameter_tuning_job: gca_hyperparameter_tuning_job.HyperparameterTuningJob = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_hyperparameter_tuning_job.HyperparameterTuningJob: - r"""Creates a HyperparameterTuningJob - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CreateHyperparameterTuningJobRequest, dict]): - The request object. Request message for - [JobService.CreateHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.CreateHyperparameterTuningJob]. - parent (:class:`str`): - Required. The resource name of the Location to create - the HyperparameterTuningJob in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - hyperparameter_tuning_job (:class:`google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob`): - Required. The HyperparameterTuningJob - to create. - - This corresponds to the ``hyperparameter_tuning_job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob: - Represents a HyperparameterTuningJob. - A HyperparameterTuningJob has a Study - specification and multiple CustomJobs - with identical CustomJob specification. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, hyperparameter_tuning_job]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.CreateHyperparameterTuningJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if hyperparameter_tuning_job is not None: - request.hyperparameter_tuning_job = hyperparameter_tuning_job - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_hyperparameter_tuning_job, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_hyperparameter_tuning_job(self, - request: Union[job_service.GetHyperparameterTuningJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> hyperparameter_tuning_job.HyperparameterTuningJob: - r"""Gets a HyperparameterTuningJob - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.GetHyperparameterTuningJobRequest, dict]): - The request object. Request message for - [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.GetHyperparameterTuningJob]. - name (:class:`str`): - Required. The name of the HyperparameterTuningJob - resource. Format: - ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob: - Represents a HyperparameterTuningJob. - A HyperparameterTuningJob has a Study - specification and multiple CustomJobs - with identical CustomJob specification. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.GetHyperparameterTuningJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_hyperparameter_tuning_job, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_hyperparameter_tuning_jobs(self, - request: Union[job_service.ListHyperparameterTuningJobsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListHyperparameterTuningJobsAsyncPager: - r"""Lists HyperparameterTuningJobs in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListHyperparameterTuningJobsRequest, dict]): - The request object. Request message for - [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1beta1.JobService.ListHyperparameterTuningJobs]. - parent (:class:`str`): - Required. The resource name of the Location to list the - HyperparameterTuningJobs from. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListHyperparameterTuningJobsAsyncPager: - Response message for - [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1beta1.JobService.ListHyperparameterTuningJobs] - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.ListHyperparameterTuningJobsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_hyperparameter_tuning_jobs, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListHyperparameterTuningJobsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_hyperparameter_tuning_job(self, - request: Union[job_service.DeleteHyperparameterTuningJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a HyperparameterTuningJob. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.DeleteHyperparameterTuningJobRequest, dict]): - The request object. Request message for - [JobService.DeleteHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.DeleteHyperparameterTuningJob]. - name (:class:`str`): - Required. The name of the HyperparameterTuningJob - resource to be deleted. Format: - ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.DeleteHyperparameterTuningJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_hyperparameter_tuning_job, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def cancel_hyperparameter_tuning_job(self, - request: Union[job_service.CancelHyperparameterTuningJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Cancels a HyperparameterTuningJob. Starts asynchronous - cancellation on the HyperparameterTuningJob. The server makes a - best effort to cancel the job, but success is not guaranteed. - Clients can use - [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.GetHyperparameterTuningJob] - or other methods to check whether the cancellation succeeded or - whether the job completed despite cancellation. On successful - cancellation, the HyperparameterTuningJob is not deleted; - instead it becomes a job with a - [HyperparameterTuningJob.error][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [HyperparameterTuningJob.state][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.state] - is set to ``CANCELLED``. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CancelHyperparameterTuningJobRequest, dict]): - The request object. Request message for - [JobService.CancelHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.CancelHyperparameterTuningJob]. - name (:class:`str`): - Required. The name of the HyperparameterTuningJob to - cancel. Format: - ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.CancelHyperparameterTuningJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.cancel_hyperparameter_tuning_job, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - async def create_batch_prediction_job(self, - request: Union[job_service.CreateBatchPredictionJobRequest, dict] = None, - *, - parent: str = None, - batch_prediction_job: gca_batch_prediction_job.BatchPredictionJob = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_batch_prediction_job.BatchPredictionJob: - r"""Creates a BatchPredictionJob. A BatchPredictionJob - once created will right away be attempted to start. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CreateBatchPredictionJobRequest, dict]): - The request object. Request message for - [JobService.CreateBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.CreateBatchPredictionJob]. - parent (:class:`str`): - Required. The resource name of the Location to create - the BatchPredictionJob in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - batch_prediction_job (:class:`google.cloud.aiplatform_v1beta1.types.BatchPredictionJob`): - Required. The BatchPredictionJob to - create. - - This corresponds to the ``batch_prediction_job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.BatchPredictionJob: - A job that uses a [Model][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] to produce predictions - on multiple [input - instances][google.cloud.aiplatform.v1beta1.BatchPredictionJob.input_config]. - If predictions for significant portion of the - instances fail, the job may finish without attempting - predictions for all remaining instances. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, batch_prediction_job]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.CreateBatchPredictionJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if batch_prediction_job is not None: - request.batch_prediction_job = batch_prediction_job - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_batch_prediction_job, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_batch_prediction_job(self, - request: Union[job_service.GetBatchPredictionJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> batch_prediction_job.BatchPredictionJob: - r"""Gets a BatchPredictionJob - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.GetBatchPredictionJobRequest, dict]): - The request object. Request message for - [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.GetBatchPredictionJob]. - name (:class:`str`): - Required. The name of the BatchPredictionJob resource. - Format: - ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.BatchPredictionJob: - A job that uses a [Model][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] to produce predictions - on multiple [input - instances][google.cloud.aiplatform.v1beta1.BatchPredictionJob.input_config]. - If predictions for significant portion of the - instances fail, the job may finish without attempting - predictions for all remaining instances. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.GetBatchPredictionJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_batch_prediction_job, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_batch_prediction_jobs(self, - request: Union[job_service.ListBatchPredictionJobsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListBatchPredictionJobsAsyncPager: - r"""Lists BatchPredictionJobs in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListBatchPredictionJobsRequest, dict]): - The request object. Request message for - [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1beta1.JobService.ListBatchPredictionJobs]. - parent (:class:`str`): - Required. The resource name of the Location to list the - BatchPredictionJobs from. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListBatchPredictionJobsAsyncPager: - Response message for - [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1beta1.JobService.ListBatchPredictionJobs] - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.ListBatchPredictionJobsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_batch_prediction_jobs, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListBatchPredictionJobsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_batch_prediction_job(self, - request: Union[job_service.DeleteBatchPredictionJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a BatchPredictionJob. Can only be called on - jobs that already finished. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.DeleteBatchPredictionJobRequest, dict]): - The request object. Request message for - [JobService.DeleteBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.DeleteBatchPredictionJob]. - name (:class:`str`): - Required. The name of the BatchPredictionJob resource to - be deleted. Format: - ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.DeleteBatchPredictionJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_batch_prediction_job, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def cancel_batch_prediction_job(self, - request: Union[job_service.CancelBatchPredictionJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Cancels a BatchPredictionJob. - - Starts asynchronous cancellation on the BatchPredictionJob. The - server makes the best effort to cancel the job, but success is - not guaranteed. Clients can use - [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.GetBatchPredictionJob] - or other methods to check whether the cancellation succeeded or - whether the job completed despite cancellation. On a successful - cancellation, the BatchPredictionJob is not deleted;instead its - [BatchPredictionJob.state][google.cloud.aiplatform.v1beta1.BatchPredictionJob.state] - is set to ``CANCELLED``. Any files already outputted by the job - are not deleted. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CancelBatchPredictionJobRequest, dict]): - The request object. Request message for - [JobService.CancelBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.CancelBatchPredictionJob]. - name (:class:`str`): - Required. The name of the BatchPredictionJob to cancel. - Format: - ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.CancelBatchPredictionJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.cancel_batch_prediction_job, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - async def create_model_deployment_monitoring_job(self, - request: Union[job_service.CreateModelDeploymentMonitoringJobRequest, dict] = None, - *, - parent: str = None, - model_deployment_monitoring_job: gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob: - r"""Creates a ModelDeploymentMonitoringJob. It will run - periodically on a configured interval. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CreateModelDeploymentMonitoringJobRequest, dict]): - The request object. Request message for - [JobService.CreateModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.CreateModelDeploymentMonitoringJob]. - parent (:class:`str`): - Required. The parent of the - ModelDeploymentMonitoringJob. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - model_deployment_monitoring_job (:class:`google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob`): - Required. The - ModelDeploymentMonitoringJob to create - - This corresponds to the ``model_deployment_monitoring_job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob: - Represents a job that runs - periodically to monitor the deployed - models in an endpoint. It will analyze - the logged training & prediction data to - detect any abnormal behaviors. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, model_deployment_monitoring_job]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.CreateModelDeploymentMonitoringJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if model_deployment_monitoring_job is not None: - request.model_deployment_monitoring_job = model_deployment_monitoring_job - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_model_deployment_monitoring_job, - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def search_model_deployment_monitoring_stats_anomalies(self, - request: Union[job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest, dict] = None, - *, - model_deployment_monitoring_job: str = None, - deployed_model_id: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.SearchModelDeploymentMonitoringStatsAnomaliesAsyncPager: - r"""Searches Model Monitoring Statistics generated within - a given time window. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesRequest, dict]): - The request object. Request message for - [JobService.SearchModelDeploymentMonitoringStatsAnomalies][google.cloud.aiplatform.v1beta1.JobService.SearchModelDeploymentMonitoringStatsAnomalies]. - model_deployment_monitoring_job (:class:`str`): - Required. ModelDeploymentMonitoring Job resource name. - Format: - \`projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job} - - This corresponds to the ``model_deployment_monitoring_job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - deployed_model_id (:class:`str`): - Required. The DeployedModel ID of the - [ModelDeploymentMonitoringObjectiveConfig.deployed_model_id]. - - This corresponds to the ``deployed_model_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.job_service.pagers.SearchModelDeploymentMonitoringStatsAnomaliesAsyncPager: - Response message for - [JobService.SearchModelDeploymentMonitoringStatsAnomalies][google.cloud.aiplatform.v1beta1.JobService.SearchModelDeploymentMonitoringStatsAnomalies]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([model_deployment_monitoring_job, deployed_model_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if model_deployment_monitoring_job is not None: - request.model_deployment_monitoring_job = model_deployment_monitoring_job - if deployed_model_id is not None: - request.deployed_model_id = deployed_model_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.search_model_deployment_monitoring_stats_anomalies, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("model_deployment_monitoring_job", request.model_deployment_monitoring_job), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.SearchModelDeploymentMonitoringStatsAnomaliesAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_model_deployment_monitoring_job(self, - request: Union[job_service.GetModelDeploymentMonitoringJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> model_deployment_monitoring_job.ModelDeploymentMonitoringJob: - r"""Gets a ModelDeploymentMonitoringJob. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.GetModelDeploymentMonitoringJobRequest, dict]): - The request object. Request message for - [JobService.GetModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.GetModelDeploymentMonitoringJob]. - name (:class:`str`): - Required. The resource name of the - ModelDeploymentMonitoringJob. Format: - ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob: - Represents a job that runs - periodically to monitor the deployed - models in an endpoint. It will analyze - the logged training & prediction data to - detect any abnormal behaviors. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.GetModelDeploymentMonitoringJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_model_deployment_monitoring_job, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_model_deployment_monitoring_jobs(self, - request: Union[job_service.ListModelDeploymentMonitoringJobsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListModelDeploymentMonitoringJobsAsyncPager: - r"""Lists ModelDeploymentMonitoringJobs in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListModelDeploymentMonitoringJobsRequest, dict]): - The request object. Request message for - [JobService.ListModelDeploymentMonitoringJobs][google.cloud.aiplatform.v1beta1.JobService.ListModelDeploymentMonitoringJobs]. - parent (:class:`str`): - Required. The parent of the - ModelDeploymentMonitoringJob. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListModelDeploymentMonitoringJobsAsyncPager: - Response message for - [JobService.ListModelDeploymentMonitoringJobs][google.cloud.aiplatform.v1beta1.JobService.ListModelDeploymentMonitoringJobs]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.ListModelDeploymentMonitoringJobsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_model_deployment_monitoring_jobs, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListModelDeploymentMonitoringJobsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_model_deployment_monitoring_job(self, - request: Union[job_service.UpdateModelDeploymentMonitoringJobRequest, dict] = None, - *, - model_deployment_monitoring_job: gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Updates a ModelDeploymentMonitoringJob. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.UpdateModelDeploymentMonitoringJobRequest, dict]): - The request object. Request message for - [JobService.UpdateModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.UpdateModelDeploymentMonitoringJob]. - model_deployment_monitoring_job (:class:`google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob`): - Required. The model monitoring - configuration which replaces the - resource on the server. - - This corresponds to the ``model_deployment_monitoring_job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. The update mask is used to specify the fields - to be overwritten in the ModelDeploymentMonitoringJob - resource by the update. The fields specified in the - update_mask are relative to the resource, not the full - request. A field will be overwritten if it is in the - mask. If the user does not provide a mask then only the - non-empty fields present in the request will be - overwritten. Set the update_mask to ``*`` to override - all fields. For the objective config, the user can - either provide the update mask for - model_deployment_monitoring_objective_configs or any - combination of its nested fields, such as: - model_deployment_monitoring_objective_configs.objective_config.training_dataset. - - Updatable fields: - - - ``display_name`` - - ``model_deployment_monitoring_schedule_config`` - - ``model_monitoring_alert_config`` - - ``logging_sampling_strategy`` - - ``labels`` - - ``log_ttl`` - - ``enable_monitoring_pipeline_logs`` . and - - ``model_deployment_monitoring_objective_configs`` . - or - - ``model_deployment_monitoring_objective_configs.objective_config.training_dataset`` - - ``model_deployment_monitoring_objective_configs.objective_config.training_prediction_skew_detection_config`` - - ``model_deployment_monitoring_objective_configs.objective_config.prediction_drift_detection_config`` - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob` Represents a job that runs periodically to monitor the deployed models in an - endpoint. It will analyze the logged training & - prediction data to detect any abnormal behaviors. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([model_deployment_monitoring_job, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.UpdateModelDeploymentMonitoringJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if model_deployment_monitoring_job is not None: - request.model_deployment_monitoring_job = model_deployment_monitoring_job - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_model_deployment_monitoring_job, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("model_deployment_monitoring_job.name", request.model_deployment_monitoring_job.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob, - metadata_type=job_service.UpdateModelDeploymentMonitoringJobOperationMetadata, - ) - - # Done; return the response. - return response - - async def delete_model_deployment_monitoring_job(self, - request: Union[job_service.DeleteModelDeploymentMonitoringJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a ModelDeploymentMonitoringJob. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.DeleteModelDeploymentMonitoringJobRequest, dict]): - The request object. Request message for - [JobService.DeleteModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.DeleteModelDeploymentMonitoringJob]. - name (:class:`str`): - Required. The resource name of the model monitoring job - to delete. Format: - ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.DeleteModelDeploymentMonitoringJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_model_deployment_monitoring_job, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def pause_model_deployment_monitoring_job(self, - request: Union[job_service.PauseModelDeploymentMonitoringJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Pauses a ModelDeploymentMonitoringJob. If the job is running, - the server makes a best effort to cancel the job. Will mark - [ModelDeploymentMonitoringJob.state][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringJob.state] - to 'PAUSED'. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.PauseModelDeploymentMonitoringJobRequest, dict]): - The request object. Request message for - [JobService.PauseModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.PauseModelDeploymentMonitoringJob]. - name (:class:`str`): - Required. The resource name of the - ModelDeploymentMonitoringJob to pause. Format: - ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.PauseModelDeploymentMonitoringJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.pause_model_deployment_monitoring_job, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - async def resume_model_deployment_monitoring_job(self, - request: Union[job_service.ResumeModelDeploymentMonitoringJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Resumes a paused ModelDeploymentMonitoringJob. It - will start to run from next scheduled time. A deleted - ModelDeploymentMonitoringJob can't be resumed. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ResumeModelDeploymentMonitoringJobRequest, dict]): - The request object. Request message for - [JobService.ResumeModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.ResumeModelDeploymentMonitoringJob]. - name (:class:`str`): - Required. The resource name of the - ModelDeploymentMonitoringJob to resume. Format: - ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = job_service.ResumeModelDeploymentMonitoringJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.resume_model_deployment_monitoring_job, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - async def __aenter__(self): - return self - - async def __aexit__(self, exc_type, exc, tb): - await self.transport.close() - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "JobServiceAsyncClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/client.py deleted file mode 100644 index 429dffac80..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/client.py +++ /dev/null @@ -1,2937 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import os -import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.job_service import pagers -from google.cloud.aiplatform_v1beta1.types import batch_prediction_job -from google.cloud.aiplatform_v1beta1.types import batch_prediction_job as gca_batch_prediction_job -from google.cloud.aiplatform_v1beta1.types import completion_stats -from google.cloud.aiplatform_v1beta1.types import custom_job -from google.cloud.aiplatform_v1beta1.types import custom_job as gca_custom_job -from google.cloud.aiplatform_v1beta1.types import data_labeling_job -from google.cloud.aiplatform_v1beta1.types import data_labeling_job as gca_data_labeling_job -from google.cloud.aiplatform_v1beta1.types import encryption_spec -from google.cloud.aiplatform_v1beta1.types import explanation -from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job -from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job as gca_hyperparameter_tuning_job -from google.cloud.aiplatform_v1beta1.types import io -from google.cloud.aiplatform_v1beta1.types import job_service -from google.cloud.aiplatform_v1beta1.types import job_state -from google.cloud.aiplatform_v1beta1.types import machine_resources -from google.cloud.aiplatform_v1beta1.types import manual_batch_tuning_parameters -from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job -from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job as gca_model_deployment_monitoring_job -from google.cloud.aiplatform_v1beta1.types import model_monitoring -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.cloud.aiplatform_v1beta1.types import study -from google.cloud.aiplatform_v1beta1.types import unmanaged_container_model -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore -from google.type import money_pb2 # type: ignore -from .transports.base import JobServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import JobServiceGrpcTransport -from .transports.grpc_asyncio import JobServiceGrpcAsyncIOTransport - - -class JobServiceClientMeta(type): - """Metaclass for the JobService client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[JobServiceTransport]] - _transport_registry["grpc"] = JobServiceGrpcTransport - _transport_registry["grpc_asyncio"] = JobServiceGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[JobServiceTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class JobServiceClient(metaclass=JobServiceClientMeta): - """A service for creating and managing Vertex AI's jobs.""" - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "aiplatform.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - JobServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - JobServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> JobServiceTransport: - """Returns the transport used by the client instance. - - Returns: - JobServiceTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def batch_prediction_job_path(project: str,location: str,batch_prediction_job: str,) -> str: - """Returns a fully-qualified batch_prediction_job string.""" - return "projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}".format(project=project, location=location, batch_prediction_job=batch_prediction_job, ) - - @staticmethod - def parse_batch_prediction_job_path(path: str) -> Dict[str,str]: - """Parses a batch_prediction_job path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/batchPredictionJobs/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def custom_job_path(project: str,location: str,custom_job: str,) -> str: - """Returns a fully-qualified custom_job string.""" - return "projects/{project}/locations/{location}/customJobs/{custom_job}".format(project=project, location=location, custom_job=custom_job, ) - - @staticmethod - def parse_custom_job_path(path: str) -> Dict[str,str]: - """Parses a custom_job path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/customJobs/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def data_labeling_job_path(project: str,location: str,data_labeling_job: str,) -> str: - """Returns a fully-qualified data_labeling_job string.""" - return "projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}".format(project=project, location=location, data_labeling_job=data_labeling_job, ) - - @staticmethod - def parse_data_labeling_job_path(path: str) -> Dict[str,str]: - """Parses a data_labeling_job path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/dataLabelingJobs/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def dataset_path(project: str,location: str,dataset: str,) -> str: - """Returns a fully-qualified dataset string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) - - @staticmethod - def parse_dataset_path(path: str) -> Dict[str,str]: - """Parses a dataset path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def endpoint_path(project: str,location: str,endpoint: str,) -> str: - """Returns a fully-qualified endpoint string.""" - return "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) - - @staticmethod - def parse_endpoint_path(path: str) -> Dict[str,str]: - """Parses a endpoint path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/endpoints/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def hyperparameter_tuning_job_path(project: str,location: str,hyperparameter_tuning_job: str,) -> str: - """Returns a fully-qualified hyperparameter_tuning_job string.""" - return "projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}".format(project=project, location=location, hyperparameter_tuning_job=hyperparameter_tuning_job, ) - - @staticmethod - def parse_hyperparameter_tuning_job_path(path: str) -> Dict[str,str]: - """Parses a hyperparameter_tuning_job path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/hyperparameterTuningJobs/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def model_path(project: str,location: str,model: str,) -> str: - """Returns a fully-qualified model string.""" - return "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) - - @staticmethod - def parse_model_path(path: str) -> Dict[str,str]: - """Parses a model path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def model_deployment_monitoring_job_path(project: str,location: str,model_deployment_monitoring_job: str,) -> str: - """Returns a fully-qualified model_deployment_monitoring_job string.""" - return "projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}".format(project=project, location=location, model_deployment_monitoring_job=model_deployment_monitoring_job, ) - - @staticmethod - def parse_model_deployment_monitoring_job_path(path: str) -> Dict[str,str]: - """Parses a model_deployment_monitoring_job path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/modelDeploymentMonitoringJobs/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def network_path(project: str,network: str,) -> str: - """Returns a fully-qualified network string.""" - return "projects/{project}/global/networks/{network}".format(project=project, network=network, ) - - @staticmethod - def parse_network_path(path: str) -> Dict[str,str]: - """Parses a network path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/global/networks/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def tensorboard_path(project: str,location: str,tensorboard: str,) -> str: - """Returns a fully-qualified tensorboard string.""" - return "projects/{project}/locations/{location}/tensorboards/{tensorboard}".format(project=project, location=location, tensorboard=tensorboard, ) - - @staticmethod - def parse_tensorboard_path(path: str) -> Dict[str,str]: - """Parses a tensorboard path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/tensorboards/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def trial_path(project: str,location: str,study: str,trial: str,) -> str: - """Returns a fully-qualified trial string.""" - return "projects/{project}/locations/{location}/studies/{study}/trials/{trial}".format(project=project, location=location, study=study, trial=trial, ) - - @staticmethod - def parse_trial_path(path: str) -> Dict[str,str]: - """Parses a trial path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/studies/(?P.+?)/trials/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, JobServiceTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the job service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, JobServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): - raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, JobServiceTransport): - # transport is a JobServiceTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - always_use_jwt_access=True, - ) - - def create_custom_job(self, - request: Union[job_service.CreateCustomJobRequest, dict] = None, - *, - parent: str = None, - custom_job: gca_custom_job.CustomJob = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_custom_job.CustomJob: - r"""Creates a CustomJob. A created CustomJob right away - will be attempted to be run. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CreateCustomJobRequest, dict]): - The request object. Request message for - [JobService.CreateCustomJob][google.cloud.aiplatform.v1beta1.JobService.CreateCustomJob]. - parent (str): - Required. The resource name of the Location to create - the CustomJob in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - custom_job (google.cloud.aiplatform_v1beta1.types.CustomJob): - Required. The CustomJob to create. - This corresponds to the ``custom_job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.CustomJob: - Represents a job that runs custom - workloads such as a Docker container or - a Python package. A CustomJob can have - multiple worker pools and each worker - pool can have its own machine and input - spec. A CustomJob will be cleaned up - once the job enters terminal state - (failed or succeeded). - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, custom_job]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.CreateCustomJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.CreateCustomJobRequest): - request = job_service.CreateCustomJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if custom_job is not None: - request.custom_job = custom_job - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_custom_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_custom_job(self, - request: Union[job_service.GetCustomJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> custom_job.CustomJob: - r"""Gets a CustomJob. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.GetCustomJobRequest, dict]): - The request object. Request message for - [JobService.GetCustomJob][google.cloud.aiplatform.v1beta1.JobService.GetCustomJob]. - name (str): - Required. The name of the CustomJob resource. Format: - ``projects/{project}/locations/{location}/customJobs/{custom_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.CustomJob: - Represents a job that runs custom - workloads such as a Docker container or - a Python package. A CustomJob can have - multiple worker pools and each worker - pool can have its own machine and input - spec. A CustomJob will be cleaned up - once the job enters terminal state - (failed or succeeded). - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.GetCustomJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.GetCustomJobRequest): - request = job_service.GetCustomJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_custom_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_custom_jobs(self, - request: Union[job_service.ListCustomJobsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListCustomJobsPager: - r"""Lists CustomJobs in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListCustomJobsRequest, dict]): - The request object. Request message for - [JobService.ListCustomJobs][google.cloud.aiplatform.v1beta1.JobService.ListCustomJobs]. - parent (str): - Required. The resource name of the Location to list the - CustomJobs from. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListCustomJobsPager: - Response message for - [JobService.ListCustomJobs][google.cloud.aiplatform.v1beta1.JobService.ListCustomJobs] - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.ListCustomJobsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.ListCustomJobsRequest): - request = job_service.ListCustomJobsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_custom_jobs] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListCustomJobsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_custom_job(self, - request: Union[job_service.DeleteCustomJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a CustomJob. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.DeleteCustomJobRequest, dict]): - The request object. Request message for - [JobService.DeleteCustomJob][google.cloud.aiplatform.v1beta1.JobService.DeleteCustomJob]. - name (str): - Required. The name of the CustomJob resource to be - deleted. Format: - ``projects/{project}/locations/{location}/customJobs/{custom_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.DeleteCustomJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.DeleteCustomJobRequest): - request = job_service.DeleteCustomJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_custom_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def cancel_custom_job(self, - request: Union[job_service.CancelCustomJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Cancels a CustomJob. Starts asynchronous cancellation on the - CustomJob. The server makes a best effort to cancel the job, but - success is not guaranteed. Clients can use - [JobService.GetCustomJob][google.cloud.aiplatform.v1beta1.JobService.GetCustomJob] - or other methods to check whether the cancellation succeeded or - whether the job completed despite cancellation. On successful - cancellation, the CustomJob is not deleted; instead it becomes a - job with a - [CustomJob.error][google.cloud.aiplatform.v1beta1.CustomJob.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [CustomJob.state][google.cloud.aiplatform.v1beta1.CustomJob.state] - is set to ``CANCELLED``. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CancelCustomJobRequest, dict]): - The request object. Request message for - [JobService.CancelCustomJob][google.cloud.aiplatform.v1beta1.JobService.CancelCustomJob]. - name (str): - Required. The name of the CustomJob to cancel. Format: - ``projects/{project}/locations/{location}/customJobs/{custom_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.CancelCustomJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.CancelCustomJobRequest): - request = job_service.CancelCustomJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.cancel_custom_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - def create_data_labeling_job(self, - request: Union[job_service.CreateDataLabelingJobRequest, dict] = None, - *, - parent: str = None, - data_labeling_job: gca_data_labeling_job.DataLabelingJob = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_data_labeling_job.DataLabelingJob: - r"""Creates a DataLabelingJob. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CreateDataLabelingJobRequest, dict]): - The request object. Request message for - [JobService.CreateDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.CreateDataLabelingJob]. - parent (str): - Required. The parent of the DataLabelingJob. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - data_labeling_job (google.cloud.aiplatform_v1beta1.types.DataLabelingJob): - Required. The DataLabelingJob to - create. - - This corresponds to the ``data_labeling_job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.DataLabelingJob: - DataLabelingJob is used to trigger a - human labeling job on unlabeled data - from the following Dataset: - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, data_labeling_job]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.CreateDataLabelingJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.CreateDataLabelingJobRequest): - request = job_service.CreateDataLabelingJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if data_labeling_job is not None: - request.data_labeling_job = data_labeling_job - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_data_labeling_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_data_labeling_job(self, - request: Union[job_service.GetDataLabelingJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> data_labeling_job.DataLabelingJob: - r"""Gets a DataLabelingJob. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.GetDataLabelingJobRequest, dict]): - The request object. Request message for - [JobService.GetDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.GetDataLabelingJob]. - name (str): - Required. The name of the DataLabelingJob. Format: - ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.DataLabelingJob: - DataLabelingJob is used to trigger a - human labeling job on unlabeled data - from the following Dataset: - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.GetDataLabelingJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.GetDataLabelingJobRequest): - request = job_service.GetDataLabelingJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_data_labeling_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_data_labeling_jobs(self, - request: Union[job_service.ListDataLabelingJobsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListDataLabelingJobsPager: - r"""Lists DataLabelingJobs in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListDataLabelingJobsRequest, dict]): - The request object. Request message for - [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1beta1.JobService.ListDataLabelingJobs]. - parent (str): - Required. The parent of the DataLabelingJob. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListDataLabelingJobsPager: - Response message for - [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1beta1.JobService.ListDataLabelingJobs]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.ListDataLabelingJobsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.ListDataLabelingJobsRequest): - request = job_service.ListDataLabelingJobsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_data_labeling_jobs] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListDataLabelingJobsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_data_labeling_job(self, - request: Union[job_service.DeleteDataLabelingJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a DataLabelingJob. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.DeleteDataLabelingJobRequest, dict]): - The request object. Request message for - [JobService.DeleteDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.DeleteDataLabelingJob]. - name (str): - Required. The name of the DataLabelingJob to be deleted. - Format: - ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.DeleteDataLabelingJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.DeleteDataLabelingJobRequest): - request = job_service.DeleteDataLabelingJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_data_labeling_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def cancel_data_labeling_job(self, - request: Union[job_service.CancelDataLabelingJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Cancels a DataLabelingJob. Success of cancellation is - not guaranteed. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CancelDataLabelingJobRequest, dict]): - The request object. Request message for - [JobService.CancelDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.CancelDataLabelingJob]. - name (str): - Required. The name of the DataLabelingJob. Format: - ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.CancelDataLabelingJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.CancelDataLabelingJobRequest): - request = job_service.CancelDataLabelingJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.cancel_data_labeling_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - def create_hyperparameter_tuning_job(self, - request: Union[job_service.CreateHyperparameterTuningJobRequest, dict] = None, - *, - parent: str = None, - hyperparameter_tuning_job: gca_hyperparameter_tuning_job.HyperparameterTuningJob = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_hyperparameter_tuning_job.HyperparameterTuningJob: - r"""Creates a HyperparameterTuningJob - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CreateHyperparameterTuningJobRequest, dict]): - The request object. Request message for - [JobService.CreateHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.CreateHyperparameterTuningJob]. - parent (str): - Required. The resource name of the Location to create - the HyperparameterTuningJob in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - hyperparameter_tuning_job (google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob): - Required. The HyperparameterTuningJob - to create. - - This corresponds to the ``hyperparameter_tuning_job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob: - Represents a HyperparameterTuningJob. - A HyperparameterTuningJob has a Study - specification and multiple CustomJobs - with identical CustomJob specification. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, hyperparameter_tuning_job]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.CreateHyperparameterTuningJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.CreateHyperparameterTuningJobRequest): - request = job_service.CreateHyperparameterTuningJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if hyperparameter_tuning_job is not None: - request.hyperparameter_tuning_job = hyperparameter_tuning_job - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_hyperparameter_tuning_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_hyperparameter_tuning_job(self, - request: Union[job_service.GetHyperparameterTuningJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> hyperparameter_tuning_job.HyperparameterTuningJob: - r"""Gets a HyperparameterTuningJob - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.GetHyperparameterTuningJobRequest, dict]): - The request object. Request message for - [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.GetHyperparameterTuningJob]. - name (str): - Required. The name of the HyperparameterTuningJob - resource. Format: - ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob: - Represents a HyperparameterTuningJob. - A HyperparameterTuningJob has a Study - specification and multiple CustomJobs - with identical CustomJob specification. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.GetHyperparameterTuningJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.GetHyperparameterTuningJobRequest): - request = job_service.GetHyperparameterTuningJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_hyperparameter_tuning_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_hyperparameter_tuning_jobs(self, - request: Union[job_service.ListHyperparameterTuningJobsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListHyperparameterTuningJobsPager: - r"""Lists HyperparameterTuningJobs in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListHyperparameterTuningJobsRequest, dict]): - The request object. Request message for - [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1beta1.JobService.ListHyperparameterTuningJobs]. - parent (str): - Required. The resource name of the Location to list the - HyperparameterTuningJobs from. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListHyperparameterTuningJobsPager: - Response message for - [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1beta1.JobService.ListHyperparameterTuningJobs] - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.ListHyperparameterTuningJobsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.ListHyperparameterTuningJobsRequest): - request = job_service.ListHyperparameterTuningJobsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_hyperparameter_tuning_jobs] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListHyperparameterTuningJobsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_hyperparameter_tuning_job(self, - request: Union[job_service.DeleteHyperparameterTuningJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a HyperparameterTuningJob. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.DeleteHyperparameterTuningJobRequest, dict]): - The request object. Request message for - [JobService.DeleteHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.DeleteHyperparameterTuningJob]. - name (str): - Required. The name of the HyperparameterTuningJob - resource to be deleted. Format: - ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.DeleteHyperparameterTuningJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.DeleteHyperparameterTuningJobRequest): - request = job_service.DeleteHyperparameterTuningJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_hyperparameter_tuning_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def cancel_hyperparameter_tuning_job(self, - request: Union[job_service.CancelHyperparameterTuningJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Cancels a HyperparameterTuningJob. Starts asynchronous - cancellation on the HyperparameterTuningJob. The server makes a - best effort to cancel the job, but success is not guaranteed. - Clients can use - [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.GetHyperparameterTuningJob] - or other methods to check whether the cancellation succeeded or - whether the job completed despite cancellation. On successful - cancellation, the HyperparameterTuningJob is not deleted; - instead it becomes a job with a - [HyperparameterTuningJob.error][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [HyperparameterTuningJob.state][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.state] - is set to ``CANCELLED``. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CancelHyperparameterTuningJobRequest, dict]): - The request object. Request message for - [JobService.CancelHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.CancelHyperparameterTuningJob]. - name (str): - Required. The name of the HyperparameterTuningJob to - cancel. Format: - ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.CancelHyperparameterTuningJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.CancelHyperparameterTuningJobRequest): - request = job_service.CancelHyperparameterTuningJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.cancel_hyperparameter_tuning_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - def create_batch_prediction_job(self, - request: Union[job_service.CreateBatchPredictionJobRequest, dict] = None, - *, - parent: str = None, - batch_prediction_job: gca_batch_prediction_job.BatchPredictionJob = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_batch_prediction_job.BatchPredictionJob: - r"""Creates a BatchPredictionJob. A BatchPredictionJob - once created will right away be attempted to start. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CreateBatchPredictionJobRequest, dict]): - The request object. Request message for - [JobService.CreateBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.CreateBatchPredictionJob]. - parent (str): - Required. The resource name of the Location to create - the BatchPredictionJob in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - batch_prediction_job (google.cloud.aiplatform_v1beta1.types.BatchPredictionJob): - Required. The BatchPredictionJob to - create. - - This corresponds to the ``batch_prediction_job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.BatchPredictionJob: - A job that uses a [Model][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] to produce predictions - on multiple [input - instances][google.cloud.aiplatform.v1beta1.BatchPredictionJob.input_config]. - If predictions for significant portion of the - instances fail, the job may finish without attempting - predictions for all remaining instances. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, batch_prediction_job]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.CreateBatchPredictionJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.CreateBatchPredictionJobRequest): - request = job_service.CreateBatchPredictionJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if batch_prediction_job is not None: - request.batch_prediction_job = batch_prediction_job - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_batch_prediction_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_batch_prediction_job(self, - request: Union[job_service.GetBatchPredictionJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> batch_prediction_job.BatchPredictionJob: - r"""Gets a BatchPredictionJob - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.GetBatchPredictionJobRequest, dict]): - The request object. Request message for - [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.GetBatchPredictionJob]. - name (str): - Required. The name of the BatchPredictionJob resource. - Format: - ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.BatchPredictionJob: - A job that uses a [Model][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] to produce predictions - on multiple [input - instances][google.cloud.aiplatform.v1beta1.BatchPredictionJob.input_config]. - If predictions for significant portion of the - instances fail, the job may finish without attempting - predictions for all remaining instances. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.GetBatchPredictionJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.GetBatchPredictionJobRequest): - request = job_service.GetBatchPredictionJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_batch_prediction_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_batch_prediction_jobs(self, - request: Union[job_service.ListBatchPredictionJobsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListBatchPredictionJobsPager: - r"""Lists BatchPredictionJobs in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListBatchPredictionJobsRequest, dict]): - The request object. Request message for - [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1beta1.JobService.ListBatchPredictionJobs]. - parent (str): - Required. The resource name of the Location to list the - BatchPredictionJobs from. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListBatchPredictionJobsPager: - Response message for - [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1beta1.JobService.ListBatchPredictionJobs] - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.ListBatchPredictionJobsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.ListBatchPredictionJobsRequest): - request = job_service.ListBatchPredictionJobsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_batch_prediction_jobs] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListBatchPredictionJobsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_batch_prediction_job(self, - request: Union[job_service.DeleteBatchPredictionJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a BatchPredictionJob. Can only be called on - jobs that already finished. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.DeleteBatchPredictionJobRequest, dict]): - The request object. Request message for - [JobService.DeleteBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.DeleteBatchPredictionJob]. - name (str): - Required. The name of the BatchPredictionJob resource to - be deleted. Format: - ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.DeleteBatchPredictionJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.DeleteBatchPredictionJobRequest): - request = job_service.DeleteBatchPredictionJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_batch_prediction_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def cancel_batch_prediction_job(self, - request: Union[job_service.CancelBatchPredictionJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Cancels a BatchPredictionJob. - - Starts asynchronous cancellation on the BatchPredictionJob. The - server makes the best effort to cancel the job, but success is - not guaranteed. Clients can use - [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.GetBatchPredictionJob] - or other methods to check whether the cancellation succeeded or - whether the job completed despite cancellation. On a successful - cancellation, the BatchPredictionJob is not deleted;instead its - [BatchPredictionJob.state][google.cloud.aiplatform.v1beta1.BatchPredictionJob.state] - is set to ``CANCELLED``. Any files already outputted by the job - are not deleted. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CancelBatchPredictionJobRequest, dict]): - The request object. Request message for - [JobService.CancelBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.CancelBatchPredictionJob]. - name (str): - Required. The name of the BatchPredictionJob to cancel. - Format: - ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.CancelBatchPredictionJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.CancelBatchPredictionJobRequest): - request = job_service.CancelBatchPredictionJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.cancel_batch_prediction_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - def create_model_deployment_monitoring_job(self, - request: Union[job_service.CreateModelDeploymentMonitoringJobRequest, dict] = None, - *, - parent: str = None, - model_deployment_monitoring_job: gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob: - r"""Creates a ModelDeploymentMonitoringJob. It will run - periodically on a configured interval. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CreateModelDeploymentMonitoringJobRequest, dict]): - The request object. Request message for - [JobService.CreateModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.CreateModelDeploymentMonitoringJob]. - parent (str): - Required. The parent of the - ModelDeploymentMonitoringJob. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - model_deployment_monitoring_job (google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob): - Required. The - ModelDeploymentMonitoringJob to create - - This corresponds to the ``model_deployment_monitoring_job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob: - Represents a job that runs - periodically to monitor the deployed - models in an endpoint. It will analyze - the logged training & prediction data to - detect any abnormal behaviors. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, model_deployment_monitoring_job]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.CreateModelDeploymentMonitoringJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.CreateModelDeploymentMonitoringJobRequest): - request = job_service.CreateModelDeploymentMonitoringJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if model_deployment_monitoring_job is not None: - request.model_deployment_monitoring_job = model_deployment_monitoring_job - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_model_deployment_monitoring_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def search_model_deployment_monitoring_stats_anomalies(self, - request: Union[job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest, dict] = None, - *, - model_deployment_monitoring_job: str = None, - deployed_model_id: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.SearchModelDeploymentMonitoringStatsAnomaliesPager: - r"""Searches Model Monitoring Statistics generated within - a given time window. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesRequest, dict]): - The request object. Request message for - [JobService.SearchModelDeploymentMonitoringStatsAnomalies][google.cloud.aiplatform.v1beta1.JobService.SearchModelDeploymentMonitoringStatsAnomalies]. - model_deployment_monitoring_job (str): - Required. ModelDeploymentMonitoring Job resource name. - Format: - \`projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job} - - This corresponds to the ``model_deployment_monitoring_job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - deployed_model_id (str): - Required. The DeployedModel ID of the - [ModelDeploymentMonitoringObjectiveConfig.deployed_model_id]. - - This corresponds to the ``deployed_model_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.job_service.pagers.SearchModelDeploymentMonitoringStatsAnomaliesPager: - Response message for - [JobService.SearchModelDeploymentMonitoringStatsAnomalies][google.cloud.aiplatform.v1beta1.JobService.SearchModelDeploymentMonitoringStatsAnomalies]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([model_deployment_monitoring_job, deployed_model_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest): - request = job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if model_deployment_monitoring_job is not None: - request.model_deployment_monitoring_job = model_deployment_monitoring_job - if deployed_model_id is not None: - request.deployed_model_id = deployed_model_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.search_model_deployment_monitoring_stats_anomalies] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("model_deployment_monitoring_job", request.model_deployment_monitoring_job), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.SearchModelDeploymentMonitoringStatsAnomaliesPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_model_deployment_monitoring_job(self, - request: Union[job_service.GetModelDeploymentMonitoringJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> model_deployment_monitoring_job.ModelDeploymentMonitoringJob: - r"""Gets a ModelDeploymentMonitoringJob. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.GetModelDeploymentMonitoringJobRequest, dict]): - The request object. Request message for - [JobService.GetModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.GetModelDeploymentMonitoringJob]. - name (str): - Required. The resource name of the - ModelDeploymentMonitoringJob. Format: - ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob: - Represents a job that runs - periodically to monitor the deployed - models in an endpoint. It will analyze - the logged training & prediction data to - detect any abnormal behaviors. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.GetModelDeploymentMonitoringJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.GetModelDeploymentMonitoringJobRequest): - request = job_service.GetModelDeploymentMonitoringJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_model_deployment_monitoring_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_model_deployment_monitoring_jobs(self, - request: Union[job_service.ListModelDeploymentMonitoringJobsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListModelDeploymentMonitoringJobsPager: - r"""Lists ModelDeploymentMonitoringJobs in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListModelDeploymentMonitoringJobsRequest, dict]): - The request object. Request message for - [JobService.ListModelDeploymentMonitoringJobs][google.cloud.aiplatform.v1beta1.JobService.ListModelDeploymentMonitoringJobs]. - parent (str): - Required. The parent of the - ModelDeploymentMonitoringJob. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListModelDeploymentMonitoringJobsPager: - Response message for - [JobService.ListModelDeploymentMonitoringJobs][google.cloud.aiplatform.v1beta1.JobService.ListModelDeploymentMonitoringJobs]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.ListModelDeploymentMonitoringJobsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.ListModelDeploymentMonitoringJobsRequest): - request = job_service.ListModelDeploymentMonitoringJobsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_model_deployment_monitoring_jobs] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListModelDeploymentMonitoringJobsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_model_deployment_monitoring_job(self, - request: Union[job_service.UpdateModelDeploymentMonitoringJobRequest, dict] = None, - *, - model_deployment_monitoring_job: gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Updates a ModelDeploymentMonitoringJob. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.UpdateModelDeploymentMonitoringJobRequest, dict]): - The request object. Request message for - [JobService.UpdateModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.UpdateModelDeploymentMonitoringJob]. - model_deployment_monitoring_job (google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob): - Required. The model monitoring - configuration which replaces the - resource on the server. - - This corresponds to the ``model_deployment_monitoring_job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The update mask is used to specify the fields - to be overwritten in the ModelDeploymentMonitoringJob - resource by the update. The fields specified in the - update_mask are relative to the resource, not the full - request. A field will be overwritten if it is in the - mask. If the user does not provide a mask then only the - non-empty fields present in the request will be - overwritten. Set the update_mask to ``*`` to override - all fields. For the objective config, the user can - either provide the update mask for - model_deployment_monitoring_objective_configs or any - combination of its nested fields, such as: - model_deployment_monitoring_objective_configs.objective_config.training_dataset. - - Updatable fields: - - - ``display_name`` - - ``model_deployment_monitoring_schedule_config`` - - ``model_monitoring_alert_config`` - - ``logging_sampling_strategy`` - - ``labels`` - - ``log_ttl`` - - ``enable_monitoring_pipeline_logs`` . and - - ``model_deployment_monitoring_objective_configs`` . - or - - ``model_deployment_monitoring_objective_configs.objective_config.training_dataset`` - - ``model_deployment_monitoring_objective_configs.objective_config.training_prediction_skew_detection_config`` - - ``model_deployment_monitoring_objective_configs.objective_config.prediction_drift_detection_config`` - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob` Represents a job that runs periodically to monitor the deployed models in an - endpoint. It will analyze the logged training & - prediction data to detect any abnormal behaviors. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([model_deployment_monitoring_job, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.UpdateModelDeploymentMonitoringJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.UpdateModelDeploymentMonitoringJobRequest): - request = job_service.UpdateModelDeploymentMonitoringJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if model_deployment_monitoring_job is not None: - request.model_deployment_monitoring_job = model_deployment_monitoring_job - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_model_deployment_monitoring_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("model_deployment_monitoring_job.name", request.model_deployment_monitoring_job.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob, - metadata_type=job_service.UpdateModelDeploymentMonitoringJobOperationMetadata, - ) - - # Done; return the response. - return response - - def delete_model_deployment_monitoring_job(self, - request: Union[job_service.DeleteModelDeploymentMonitoringJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a ModelDeploymentMonitoringJob. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.DeleteModelDeploymentMonitoringJobRequest, dict]): - The request object. Request message for - [JobService.DeleteModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.DeleteModelDeploymentMonitoringJob]. - name (str): - Required. The resource name of the model monitoring job - to delete. Format: - ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.DeleteModelDeploymentMonitoringJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.DeleteModelDeploymentMonitoringJobRequest): - request = job_service.DeleteModelDeploymentMonitoringJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_model_deployment_monitoring_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def pause_model_deployment_monitoring_job(self, - request: Union[job_service.PauseModelDeploymentMonitoringJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Pauses a ModelDeploymentMonitoringJob. If the job is running, - the server makes a best effort to cancel the job. Will mark - [ModelDeploymentMonitoringJob.state][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringJob.state] - to 'PAUSED'. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.PauseModelDeploymentMonitoringJobRequest, dict]): - The request object. Request message for - [JobService.PauseModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.PauseModelDeploymentMonitoringJob]. - name (str): - Required. The resource name of the - ModelDeploymentMonitoringJob to pause. Format: - ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.PauseModelDeploymentMonitoringJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.PauseModelDeploymentMonitoringJobRequest): - request = job_service.PauseModelDeploymentMonitoringJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.pause_model_deployment_monitoring_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - def resume_model_deployment_monitoring_job(self, - request: Union[job_service.ResumeModelDeploymentMonitoringJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Resumes a paused ModelDeploymentMonitoringJob. It - will start to run from next scheduled time. A deleted - ModelDeploymentMonitoringJob can't be resumed. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ResumeModelDeploymentMonitoringJobRequest, dict]): - The request object. Request message for - [JobService.ResumeModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.ResumeModelDeploymentMonitoringJob]. - name (str): - Required. The resource name of the - ModelDeploymentMonitoringJob to resume. Format: - ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a job_service.ResumeModelDeploymentMonitoringJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, job_service.ResumeModelDeploymentMonitoringJobRequest): - request = job_service.ResumeModelDeploymentMonitoringJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.resume_model_deployment_monitoring_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - """Releases underlying transport's resources. - - .. warning:: - ONLY use as a context manager if the transport is NOT shared - with other clients! Exiting the with block will CLOSE the transport - and may cause errors in other clients! - """ - self.transport.close() - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "JobServiceClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/pagers.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/pagers.py deleted file mode 100644 index 40e1999b5e..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/pagers.py +++ /dev/null @@ -1,756 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator - -from google.cloud.aiplatform_v1beta1.types import batch_prediction_job -from google.cloud.aiplatform_v1beta1.types import custom_job -from google.cloud.aiplatform_v1beta1.types import data_labeling_job -from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job -from google.cloud.aiplatform_v1beta1.types import job_service -from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job -from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job as gca_model_deployment_monitoring_job - - -class ListCustomJobsPager: - """A pager for iterating through ``list_custom_jobs`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListCustomJobsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``custom_jobs`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListCustomJobs`` requests and continue to iterate - through the ``custom_jobs`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListCustomJobsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., job_service.ListCustomJobsResponse], - request: job_service.ListCustomJobsRequest, - response: job_service.ListCustomJobsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListCustomJobsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListCustomJobsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = job_service.ListCustomJobsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[job_service.ListCustomJobsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[custom_job.CustomJob]: - for page in self.pages: - yield from page.custom_jobs - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListCustomJobsAsyncPager: - """A pager for iterating through ``list_custom_jobs`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListCustomJobsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``custom_jobs`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListCustomJobs`` requests and continue to iterate - through the ``custom_jobs`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListCustomJobsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[job_service.ListCustomJobsResponse]], - request: job_service.ListCustomJobsRequest, - response: job_service.ListCustomJobsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListCustomJobsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListCustomJobsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = job_service.ListCustomJobsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[job_service.ListCustomJobsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[custom_job.CustomJob]: - async def async_generator(): - async for page in self.pages: - for response in page.custom_jobs: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListDataLabelingJobsPager: - """A pager for iterating through ``list_data_labeling_jobs`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListDataLabelingJobsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``data_labeling_jobs`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListDataLabelingJobs`` requests and continue to iterate - through the ``data_labeling_jobs`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListDataLabelingJobsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., job_service.ListDataLabelingJobsResponse], - request: job_service.ListDataLabelingJobsRequest, - response: job_service.ListDataLabelingJobsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListDataLabelingJobsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListDataLabelingJobsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = job_service.ListDataLabelingJobsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[job_service.ListDataLabelingJobsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[data_labeling_job.DataLabelingJob]: - for page in self.pages: - yield from page.data_labeling_jobs - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListDataLabelingJobsAsyncPager: - """A pager for iterating through ``list_data_labeling_jobs`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListDataLabelingJobsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``data_labeling_jobs`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListDataLabelingJobs`` requests and continue to iterate - through the ``data_labeling_jobs`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListDataLabelingJobsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[job_service.ListDataLabelingJobsResponse]], - request: job_service.ListDataLabelingJobsRequest, - response: job_service.ListDataLabelingJobsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListDataLabelingJobsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListDataLabelingJobsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = job_service.ListDataLabelingJobsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[job_service.ListDataLabelingJobsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[data_labeling_job.DataLabelingJob]: - async def async_generator(): - async for page in self.pages: - for response in page.data_labeling_jobs: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListHyperparameterTuningJobsPager: - """A pager for iterating through ``list_hyperparameter_tuning_jobs`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListHyperparameterTuningJobsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``hyperparameter_tuning_jobs`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListHyperparameterTuningJobs`` requests and continue to iterate - through the ``hyperparameter_tuning_jobs`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListHyperparameterTuningJobsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., job_service.ListHyperparameterTuningJobsResponse], - request: job_service.ListHyperparameterTuningJobsRequest, - response: job_service.ListHyperparameterTuningJobsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListHyperparameterTuningJobsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListHyperparameterTuningJobsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = job_service.ListHyperparameterTuningJobsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[job_service.ListHyperparameterTuningJobsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[hyperparameter_tuning_job.HyperparameterTuningJob]: - for page in self.pages: - yield from page.hyperparameter_tuning_jobs - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListHyperparameterTuningJobsAsyncPager: - """A pager for iterating through ``list_hyperparameter_tuning_jobs`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListHyperparameterTuningJobsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``hyperparameter_tuning_jobs`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListHyperparameterTuningJobs`` requests and continue to iterate - through the ``hyperparameter_tuning_jobs`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListHyperparameterTuningJobsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[job_service.ListHyperparameterTuningJobsResponse]], - request: job_service.ListHyperparameterTuningJobsRequest, - response: job_service.ListHyperparameterTuningJobsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListHyperparameterTuningJobsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListHyperparameterTuningJobsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = job_service.ListHyperparameterTuningJobsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[job_service.ListHyperparameterTuningJobsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[hyperparameter_tuning_job.HyperparameterTuningJob]: - async def async_generator(): - async for page in self.pages: - for response in page.hyperparameter_tuning_jobs: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListBatchPredictionJobsPager: - """A pager for iterating through ``list_batch_prediction_jobs`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListBatchPredictionJobsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``batch_prediction_jobs`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListBatchPredictionJobs`` requests and continue to iterate - through the ``batch_prediction_jobs`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListBatchPredictionJobsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., job_service.ListBatchPredictionJobsResponse], - request: job_service.ListBatchPredictionJobsRequest, - response: job_service.ListBatchPredictionJobsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListBatchPredictionJobsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListBatchPredictionJobsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = job_service.ListBatchPredictionJobsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[job_service.ListBatchPredictionJobsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[batch_prediction_job.BatchPredictionJob]: - for page in self.pages: - yield from page.batch_prediction_jobs - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListBatchPredictionJobsAsyncPager: - """A pager for iterating through ``list_batch_prediction_jobs`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListBatchPredictionJobsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``batch_prediction_jobs`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListBatchPredictionJobs`` requests and continue to iterate - through the ``batch_prediction_jobs`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListBatchPredictionJobsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[job_service.ListBatchPredictionJobsResponse]], - request: job_service.ListBatchPredictionJobsRequest, - response: job_service.ListBatchPredictionJobsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListBatchPredictionJobsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListBatchPredictionJobsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = job_service.ListBatchPredictionJobsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[job_service.ListBatchPredictionJobsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[batch_prediction_job.BatchPredictionJob]: - async def async_generator(): - async for page in self.pages: - for response in page.batch_prediction_jobs: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class SearchModelDeploymentMonitoringStatsAnomaliesPager: - """A pager for iterating through ``search_model_deployment_monitoring_stats_anomalies`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesResponse` object, and - provides an ``__iter__`` method to iterate through its - ``monitoring_stats`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``SearchModelDeploymentMonitoringStatsAnomalies`` requests and continue to iterate - through the ``monitoring_stats`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse], - request: job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest, - response: job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies]: - for page in self.pages: - yield from page.monitoring_stats - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class SearchModelDeploymentMonitoringStatsAnomaliesAsyncPager: - """A pager for iterating through ``search_model_deployment_monitoring_stats_anomalies`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``monitoring_stats`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``SearchModelDeploymentMonitoringStatsAnomalies`` requests and continue to iterate - through the ``monitoring_stats`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse]], - request: job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest, - response: job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies]: - async def async_generator(): - async for page in self.pages: - for response in page.monitoring_stats: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListModelDeploymentMonitoringJobsPager: - """A pager for iterating through ``list_model_deployment_monitoring_jobs`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListModelDeploymentMonitoringJobsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``model_deployment_monitoring_jobs`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListModelDeploymentMonitoringJobs`` requests and continue to iterate - through the ``model_deployment_monitoring_jobs`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListModelDeploymentMonitoringJobsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., job_service.ListModelDeploymentMonitoringJobsResponse], - request: job_service.ListModelDeploymentMonitoringJobsRequest, - response: job_service.ListModelDeploymentMonitoringJobsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListModelDeploymentMonitoringJobsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListModelDeploymentMonitoringJobsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = job_service.ListModelDeploymentMonitoringJobsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[job_service.ListModelDeploymentMonitoringJobsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[model_deployment_monitoring_job.ModelDeploymentMonitoringJob]: - for page in self.pages: - yield from page.model_deployment_monitoring_jobs - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListModelDeploymentMonitoringJobsAsyncPager: - """A pager for iterating through ``list_model_deployment_monitoring_jobs`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListModelDeploymentMonitoringJobsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``model_deployment_monitoring_jobs`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListModelDeploymentMonitoringJobs`` requests and continue to iterate - through the ``model_deployment_monitoring_jobs`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListModelDeploymentMonitoringJobsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[job_service.ListModelDeploymentMonitoringJobsResponse]], - request: job_service.ListModelDeploymentMonitoringJobsRequest, - response: job_service.ListModelDeploymentMonitoringJobsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListModelDeploymentMonitoringJobsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListModelDeploymentMonitoringJobsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = job_service.ListModelDeploymentMonitoringJobsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[job_service.ListModelDeploymentMonitoringJobsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[model_deployment_monitoring_job.ModelDeploymentMonitoringJob]: - async def async_generator(): - async for page in self.pages: - for response in page.model_deployment_monitoring_jobs: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/__init__.py deleted file mode 100644 index 13c5f7ade5..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import JobServiceTransport -from .grpc import JobServiceGrpcTransport -from .grpc_asyncio import JobServiceGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[JobServiceTransport]] -_transport_registry['grpc'] = JobServiceGrpcTransport -_transport_registry['grpc_asyncio'] = JobServiceGrpcAsyncIOTransport - -__all__ = ( - 'JobServiceTransport', - 'JobServiceGrpcTransport', - 'JobServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/base.py deleted file mode 100644 index 222b310ec8..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/base.py +++ /dev/null @@ -1,542 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import pkg_resources - -import google.auth # type: ignore -import google.api_core -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.aiplatform_v1beta1.types import batch_prediction_job -from google.cloud.aiplatform_v1beta1.types import batch_prediction_job as gca_batch_prediction_job -from google.cloud.aiplatform_v1beta1.types import custom_job -from google.cloud.aiplatform_v1beta1.types import custom_job as gca_custom_job -from google.cloud.aiplatform_v1beta1.types import data_labeling_job -from google.cloud.aiplatform_v1beta1.types import data_labeling_job as gca_data_labeling_job -from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job -from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job as gca_hyperparameter_tuning_job -from google.cloud.aiplatform_v1beta1.types import job_service -from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job -from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job as gca_model_deployment_monitoring_job -from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-aiplatform', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -class JobServiceTransport(abc.ABC): - """Abstract transport class for JobService.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'aiplatform.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials are service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.create_custom_job: gapic_v1.method.wrap_method( - self.create_custom_job, - default_timeout=5.0, - client_info=client_info, - ), - self.get_custom_job: gapic_v1.method.wrap_method( - self.get_custom_job, - default_timeout=5.0, - client_info=client_info, - ), - self.list_custom_jobs: gapic_v1.method.wrap_method( - self.list_custom_jobs, - default_timeout=5.0, - client_info=client_info, - ), - self.delete_custom_job: gapic_v1.method.wrap_method( - self.delete_custom_job, - default_timeout=5.0, - client_info=client_info, - ), - self.cancel_custom_job: gapic_v1.method.wrap_method( - self.cancel_custom_job, - default_timeout=5.0, - client_info=client_info, - ), - self.create_data_labeling_job: gapic_v1.method.wrap_method( - self.create_data_labeling_job, - default_timeout=5.0, - client_info=client_info, - ), - self.get_data_labeling_job: gapic_v1.method.wrap_method( - self.get_data_labeling_job, - default_timeout=5.0, - client_info=client_info, - ), - self.list_data_labeling_jobs: gapic_v1.method.wrap_method( - self.list_data_labeling_jobs, - default_timeout=5.0, - client_info=client_info, - ), - self.delete_data_labeling_job: gapic_v1.method.wrap_method( - self.delete_data_labeling_job, - default_timeout=5.0, - client_info=client_info, - ), - self.cancel_data_labeling_job: gapic_v1.method.wrap_method( - self.cancel_data_labeling_job, - default_timeout=5.0, - client_info=client_info, - ), - self.create_hyperparameter_tuning_job: gapic_v1.method.wrap_method( - self.create_hyperparameter_tuning_job, - default_timeout=5.0, - client_info=client_info, - ), - self.get_hyperparameter_tuning_job: gapic_v1.method.wrap_method( - self.get_hyperparameter_tuning_job, - default_timeout=5.0, - client_info=client_info, - ), - self.list_hyperparameter_tuning_jobs: gapic_v1.method.wrap_method( - self.list_hyperparameter_tuning_jobs, - default_timeout=5.0, - client_info=client_info, - ), - self.delete_hyperparameter_tuning_job: gapic_v1.method.wrap_method( - self.delete_hyperparameter_tuning_job, - default_timeout=5.0, - client_info=client_info, - ), - self.cancel_hyperparameter_tuning_job: gapic_v1.method.wrap_method( - self.cancel_hyperparameter_tuning_job, - default_timeout=5.0, - client_info=client_info, - ), - self.create_batch_prediction_job: gapic_v1.method.wrap_method( - self.create_batch_prediction_job, - default_timeout=5.0, - client_info=client_info, - ), - self.get_batch_prediction_job: gapic_v1.method.wrap_method( - self.get_batch_prediction_job, - default_timeout=5.0, - client_info=client_info, - ), - self.list_batch_prediction_jobs: gapic_v1.method.wrap_method( - self.list_batch_prediction_jobs, - default_timeout=5.0, - client_info=client_info, - ), - self.delete_batch_prediction_job: gapic_v1.method.wrap_method( - self.delete_batch_prediction_job, - default_timeout=5.0, - client_info=client_info, - ), - self.cancel_batch_prediction_job: gapic_v1.method.wrap_method( - self.cancel_batch_prediction_job, - default_timeout=5.0, - client_info=client_info, - ), - self.create_model_deployment_monitoring_job: gapic_v1.method.wrap_method( - self.create_model_deployment_monitoring_job, - default_timeout=60.0, - client_info=client_info, - ), - self.search_model_deployment_monitoring_stats_anomalies: gapic_v1.method.wrap_method( - self.search_model_deployment_monitoring_stats_anomalies, - default_timeout=5.0, - client_info=client_info, - ), - self.get_model_deployment_monitoring_job: gapic_v1.method.wrap_method( - self.get_model_deployment_monitoring_job, - default_timeout=5.0, - client_info=client_info, - ), - self.list_model_deployment_monitoring_jobs: gapic_v1.method.wrap_method( - self.list_model_deployment_monitoring_jobs, - default_timeout=5.0, - client_info=client_info, - ), - self.update_model_deployment_monitoring_job: gapic_v1.method.wrap_method( - self.update_model_deployment_monitoring_job, - default_timeout=5.0, - client_info=client_info, - ), - self.delete_model_deployment_monitoring_job: gapic_v1.method.wrap_method( - self.delete_model_deployment_monitoring_job, - default_timeout=5.0, - client_info=client_info, - ), - self.pause_model_deployment_monitoring_job: gapic_v1.method.wrap_method( - self.pause_model_deployment_monitoring_job, - default_timeout=5.0, - client_info=client_info, - ), - self.resume_model_deployment_monitoring_job: gapic_v1.method.wrap_method( - self.resume_model_deployment_monitoring_job, - default_timeout=5.0, - client_info=client_info, - ), - } - - def close(self): - """Closes resources associated with the transport. - - .. warning:: - Only call this method if the transport is NOT shared - with other clients - this may cause errors in other clients! - """ - raise NotImplementedError() - - @property - def operations_client(self): - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def create_custom_job(self) -> Callable[ - [job_service.CreateCustomJobRequest], - Union[ - gca_custom_job.CustomJob, - Awaitable[gca_custom_job.CustomJob] - ]]: - raise NotImplementedError() - - @property - def get_custom_job(self) -> Callable[ - [job_service.GetCustomJobRequest], - Union[ - custom_job.CustomJob, - Awaitable[custom_job.CustomJob] - ]]: - raise NotImplementedError() - - @property - def list_custom_jobs(self) -> Callable[ - [job_service.ListCustomJobsRequest], - Union[ - job_service.ListCustomJobsResponse, - Awaitable[job_service.ListCustomJobsResponse] - ]]: - raise NotImplementedError() - - @property - def delete_custom_job(self) -> Callable[ - [job_service.DeleteCustomJobRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def cancel_custom_job(self) -> Callable[ - [job_service.CancelCustomJobRequest], - Union[ - empty_pb2.Empty, - Awaitable[empty_pb2.Empty] - ]]: - raise NotImplementedError() - - @property - def create_data_labeling_job(self) -> Callable[ - [job_service.CreateDataLabelingJobRequest], - Union[ - gca_data_labeling_job.DataLabelingJob, - Awaitable[gca_data_labeling_job.DataLabelingJob] - ]]: - raise NotImplementedError() - - @property - def get_data_labeling_job(self) -> Callable[ - [job_service.GetDataLabelingJobRequest], - Union[ - data_labeling_job.DataLabelingJob, - Awaitable[data_labeling_job.DataLabelingJob] - ]]: - raise NotImplementedError() - - @property - def list_data_labeling_jobs(self) -> Callable[ - [job_service.ListDataLabelingJobsRequest], - Union[ - job_service.ListDataLabelingJobsResponse, - Awaitable[job_service.ListDataLabelingJobsResponse] - ]]: - raise NotImplementedError() - - @property - def delete_data_labeling_job(self) -> Callable[ - [job_service.DeleteDataLabelingJobRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def cancel_data_labeling_job(self) -> Callable[ - [job_service.CancelDataLabelingJobRequest], - Union[ - empty_pb2.Empty, - Awaitable[empty_pb2.Empty] - ]]: - raise NotImplementedError() - - @property - def create_hyperparameter_tuning_job(self) -> Callable[ - [job_service.CreateHyperparameterTuningJobRequest], - Union[ - gca_hyperparameter_tuning_job.HyperparameterTuningJob, - Awaitable[gca_hyperparameter_tuning_job.HyperparameterTuningJob] - ]]: - raise NotImplementedError() - - @property - def get_hyperparameter_tuning_job(self) -> Callable[ - [job_service.GetHyperparameterTuningJobRequest], - Union[ - hyperparameter_tuning_job.HyperparameterTuningJob, - Awaitable[hyperparameter_tuning_job.HyperparameterTuningJob] - ]]: - raise NotImplementedError() - - @property - def list_hyperparameter_tuning_jobs(self) -> Callable[ - [job_service.ListHyperparameterTuningJobsRequest], - Union[ - job_service.ListHyperparameterTuningJobsResponse, - Awaitable[job_service.ListHyperparameterTuningJobsResponse] - ]]: - raise NotImplementedError() - - @property - def delete_hyperparameter_tuning_job(self) -> Callable[ - [job_service.DeleteHyperparameterTuningJobRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def cancel_hyperparameter_tuning_job(self) -> Callable[ - [job_service.CancelHyperparameterTuningJobRequest], - Union[ - empty_pb2.Empty, - Awaitable[empty_pb2.Empty] - ]]: - raise NotImplementedError() - - @property - def create_batch_prediction_job(self) -> Callable[ - [job_service.CreateBatchPredictionJobRequest], - Union[ - gca_batch_prediction_job.BatchPredictionJob, - Awaitable[gca_batch_prediction_job.BatchPredictionJob] - ]]: - raise NotImplementedError() - - @property - def get_batch_prediction_job(self) -> Callable[ - [job_service.GetBatchPredictionJobRequest], - Union[ - batch_prediction_job.BatchPredictionJob, - Awaitable[batch_prediction_job.BatchPredictionJob] - ]]: - raise NotImplementedError() - - @property - def list_batch_prediction_jobs(self) -> Callable[ - [job_service.ListBatchPredictionJobsRequest], - Union[ - job_service.ListBatchPredictionJobsResponse, - Awaitable[job_service.ListBatchPredictionJobsResponse] - ]]: - raise NotImplementedError() - - @property - def delete_batch_prediction_job(self) -> Callable[ - [job_service.DeleteBatchPredictionJobRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def cancel_batch_prediction_job(self) -> Callable[ - [job_service.CancelBatchPredictionJobRequest], - Union[ - empty_pb2.Empty, - Awaitable[empty_pb2.Empty] - ]]: - raise NotImplementedError() - - @property - def create_model_deployment_monitoring_job(self) -> Callable[ - [job_service.CreateModelDeploymentMonitoringJobRequest], - Union[ - gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob, - Awaitable[gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob] - ]]: - raise NotImplementedError() - - @property - def search_model_deployment_monitoring_stats_anomalies(self) -> Callable[ - [job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest], - Union[ - job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse, - Awaitable[job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse] - ]]: - raise NotImplementedError() - - @property - def get_model_deployment_monitoring_job(self) -> Callable[ - [job_service.GetModelDeploymentMonitoringJobRequest], - Union[ - model_deployment_monitoring_job.ModelDeploymentMonitoringJob, - Awaitable[model_deployment_monitoring_job.ModelDeploymentMonitoringJob] - ]]: - raise NotImplementedError() - - @property - def list_model_deployment_monitoring_jobs(self) -> Callable[ - [job_service.ListModelDeploymentMonitoringJobsRequest], - Union[ - job_service.ListModelDeploymentMonitoringJobsResponse, - Awaitable[job_service.ListModelDeploymentMonitoringJobsResponse] - ]]: - raise NotImplementedError() - - @property - def update_model_deployment_monitoring_job(self) -> Callable[ - [job_service.UpdateModelDeploymentMonitoringJobRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def delete_model_deployment_monitoring_job(self) -> Callable[ - [job_service.DeleteModelDeploymentMonitoringJobRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def pause_model_deployment_monitoring_job(self) -> Callable[ - [job_service.PauseModelDeploymentMonitoringJobRequest], - Union[ - empty_pb2.Empty, - Awaitable[empty_pb2.Empty] - ]]: - raise NotImplementedError() - - @property - def resume_model_deployment_monitoring_job(self) -> Callable[ - [job_service.ResumeModelDeploymentMonitoringJobRequest], - Union[ - empty_pb2.Empty, - Awaitable[empty_pb2.Empty] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'JobServiceTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc.py deleted file mode 100644 index 0d57300862..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc.py +++ /dev/null @@ -1,1045 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers -from google.api_core import operations_v1 -from google.api_core import gapic_v1 -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.aiplatform_v1beta1.types import batch_prediction_job -from google.cloud.aiplatform_v1beta1.types import batch_prediction_job as gca_batch_prediction_job -from google.cloud.aiplatform_v1beta1.types import custom_job -from google.cloud.aiplatform_v1beta1.types import custom_job as gca_custom_job -from google.cloud.aiplatform_v1beta1.types import data_labeling_job -from google.cloud.aiplatform_v1beta1.types import data_labeling_job as gca_data_labeling_job -from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job -from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job as gca_hyperparameter_tuning_job -from google.cloud.aiplatform_v1beta1.types import job_service -from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job -from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job as gca_model_deployment_monitoring_job -from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from .base import JobServiceTransport, DEFAULT_CLIENT_INFO - - -class JobServiceGrpcTransport(JobServiceTransport): - """gRPC backend transport for JobService. - - A service for creating and managing Vertex AI's jobs. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_custom_job(self) -> Callable[ - [job_service.CreateCustomJobRequest], - gca_custom_job.CustomJob]: - r"""Return a callable for the create custom job method over gRPC. - - Creates a CustomJob. A created CustomJob right away - will be attempted to be run. - - Returns: - Callable[[~.CreateCustomJobRequest], - ~.CustomJob]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_custom_job' not in self._stubs: - self._stubs['create_custom_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/CreateCustomJob', - request_serializer=job_service.CreateCustomJobRequest.serialize, - response_deserializer=gca_custom_job.CustomJob.deserialize, - ) - return self._stubs['create_custom_job'] - - @property - def get_custom_job(self) -> Callable[ - [job_service.GetCustomJobRequest], - custom_job.CustomJob]: - r"""Return a callable for the get custom job method over gRPC. - - Gets a CustomJob. - - Returns: - Callable[[~.GetCustomJobRequest], - ~.CustomJob]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_custom_job' not in self._stubs: - self._stubs['get_custom_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/GetCustomJob', - request_serializer=job_service.GetCustomJobRequest.serialize, - response_deserializer=custom_job.CustomJob.deserialize, - ) - return self._stubs['get_custom_job'] - - @property - def list_custom_jobs(self) -> Callable[ - [job_service.ListCustomJobsRequest], - job_service.ListCustomJobsResponse]: - r"""Return a callable for the list custom jobs method over gRPC. - - Lists CustomJobs in a Location. - - Returns: - Callable[[~.ListCustomJobsRequest], - ~.ListCustomJobsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_custom_jobs' not in self._stubs: - self._stubs['list_custom_jobs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/ListCustomJobs', - request_serializer=job_service.ListCustomJobsRequest.serialize, - response_deserializer=job_service.ListCustomJobsResponse.deserialize, - ) - return self._stubs['list_custom_jobs'] - - @property - def delete_custom_job(self) -> Callable[ - [job_service.DeleteCustomJobRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete custom job method over gRPC. - - Deletes a CustomJob. - - Returns: - Callable[[~.DeleteCustomJobRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_custom_job' not in self._stubs: - self._stubs['delete_custom_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/DeleteCustomJob', - request_serializer=job_service.DeleteCustomJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_custom_job'] - - @property - def cancel_custom_job(self) -> Callable[ - [job_service.CancelCustomJobRequest], - empty_pb2.Empty]: - r"""Return a callable for the cancel custom job method over gRPC. - - Cancels a CustomJob. Starts asynchronous cancellation on the - CustomJob. The server makes a best effort to cancel the job, but - success is not guaranteed. Clients can use - [JobService.GetCustomJob][google.cloud.aiplatform.v1beta1.JobService.GetCustomJob] - or other methods to check whether the cancellation succeeded or - whether the job completed despite cancellation. On successful - cancellation, the CustomJob is not deleted; instead it becomes a - job with a - [CustomJob.error][google.cloud.aiplatform.v1beta1.CustomJob.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [CustomJob.state][google.cloud.aiplatform.v1beta1.CustomJob.state] - is set to ``CANCELLED``. - - Returns: - Callable[[~.CancelCustomJobRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'cancel_custom_job' not in self._stubs: - self._stubs['cancel_custom_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/CancelCustomJob', - request_serializer=job_service.CancelCustomJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['cancel_custom_job'] - - @property - def create_data_labeling_job(self) -> Callable[ - [job_service.CreateDataLabelingJobRequest], - gca_data_labeling_job.DataLabelingJob]: - r"""Return a callable for the create data labeling job method over gRPC. - - Creates a DataLabelingJob. - - Returns: - Callable[[~.CreateDataLabelingJobRequest], - ~.DataLabelingJob]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_data_labeling_job' not in self._stubs: - self._stubs['create_data_labeling_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/CreateDataLabelingJob', - request_serializer=job_service.CreateDataLabelingJobRequest.serialize, - response_deserializer=gca_data_labeling_job.DataLabelingJob.deserialize, - ) - return self._stubs['create_data_labeling_job'] - - @property - def get_data_labeling_job(self) -> Callable[ - [job_service.GetDataLabelingJobRequest], - data_labeling_job.DataLabelingJob]: - r"""Return a callable for the get data labeling job method over gRPC. - - Gets a DataLabelingJob. - - Returns: - Callable[[~.GetDataLabelingJobRequest], - ~.DataLabelingJob]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_data_labeling_job' not in self._stubs: - self._stubs['get_data_labeling_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/GetDataLabelingJob', - request_serializer=job_service.GetDataLabelingJobRequest.serialize, - response_deserializer=data_labeling_job.DataLabelingJob.deserialize, - ) - return self._stubs['get_data_labeling_job'] - - @property - def list_data_labeling_jobs(self) -> Callable[ - [job_service.ListDataLabelingJobsRequest], - job_service.ListDataLabelingJobsResponse]: - r"""Return a callable for the list data labeling jobs method over gRPC. - - Lists DataLabelingJobs in a Location. - - Returns: - Callable[[~.ListDataLabelingJobsRequest], - ~.ListDataLabelingJobsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_data_labeling_jobs' not in self._stubs: - self._stubs['list_data_labeling_jobs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/ListDataLabelingJobs', - request_serializer=job_service.ListDataLabelingJobsRequest.serialize, - response_deserializer=job_service.ListDataLabelingJobsResponse.deserialize, - ) - return self._stubs['list_data_labeling_jobs'] - - @property - def delete_data_labeling_job(self) -> Callable[ - [job_service.DeleteDataLabelingJobRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete data labeling job method over gRPC. - - Deletes a DataLabelingJob. - - Returns: - Callable[[~.DeleteDataLabelingJobRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_data_labeling_job' not in self._stubs: - self._stubs['delete_data_labeling_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/DeleteDataLabelingJob', - request_serializer=job_service.DeleteDataLabelingJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_data_labeling_job'] - - @property - def cancel_data_labeling_job(self) -> Callable[ - [job_service.CancelDataLabelingJobRequest], - empty_pb2.Empty]: - r"""Return a callable for the cancel data labeling job method over gRPC. - - Cancels a DataLabelingJob. Success of cancellation is - not guaranteed. - - Returns: - Callable[[~.CancelDataLabelingJobRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'cancel_data_labeling_job' not in self._stubs: - self._stubs['cancel_data_labeling_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/CancelDataLabelingJob', - request_serializer=job_service.CancelDataLabelingJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['cancel_data_labeling_job'] - - @property - def create_hyperparameter_tuning_job(self) -> Callable[ - [job_service.CreateHyperparameterTuningJobRequest], - gca_hyperparameter_tuning_job.HyperparameterTuningJob]: - r"""Return a callable for the create hyperparameter tuning - job method over gRPC. - - Creates a HyperparameterTuningJob - - Returns: - Callable[[~.CreateHyperparameterTuningJobRequest], - ~.HyperparameterTuningJob]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_hyperparameter_tuning_job' not in self._stubs: - self._stubs['create_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/CreateHyperparameterTuningJob', - request_serializer=job_service.CreateHyperparameterTuningJobRequest.serialize, - response_deserializer=gca_hyperparameter_tuning_job.HyperparameterTuningJob.deserialize, - ) - return self._stubs['create_hyperparameter_tuning_job'] - - @property - def get_hyperparameter_tuning_job(self) -> Callable[ - [job_service.GetHyperparameterTuningJobRequest], - hyperparameter_tuning_job.HyperparameterTuningJob]: - r"""Return a callable for the get hyperparameter tuning job method over gRPC. - - Gets a HyperparameterTuningJob - - Returns: - Callable[[~.GetHyperparameterTuningJobRequest], - ~.HyperparameterTuningJob]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_hyperparameter_tuning_job' not in self._stubs: - self._stubs['get_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/GetHyperparameterTuningJob', - request_serializer=job_service.GetHyperparameterTuningJobRequest.serialize, - response_deserializer=hyperparameter_tuning_job.HyperparameterTuningJob.deserialize, - ) - return self._stubs['get_hyperparameter_tuning_job'] - - @property - def list_hyperparameter_tuning_jobs(self) -> Callable[ - [job_service.ListHyperparameterTuningJobsRequest], - job_service.ListHyperparameterTuningJobsResponse]: - r"""Return a callable for the list hyperparameter tuning - jobs method over gRPC. - - Lists HyperparameterTuningJobs in a Location. - - Returns: - Callable[[~.ListHyperparameterTuningJobsRequest], - ~.ListHyperparameterTuningJobsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_hyperparameter_tuning_jobs' not in self._stubs: - self._stubs['list_hyperparameter_tuning_jobs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/ListHyperparameterTuningJobs', - request_serializer=job_service.ListHyperparameterTuningJobsRequest.serialize, - response_deserializer=job_service.ListHyperparameterTuningJobsResponse.deserialize, - ) - return self._stubs['list_hyperparameter_tuning_jobs'] - - @property - def delete_hyperparameter_tuning_job(self) -> Callable[ - [job_service.DeleteHyperparameterTuningJobRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete hyperparameter tuning - job method over gRPC. - - Deletes a HyperparameterTuningJob. - - Returns: - Callable[[~.DeleteHyperparameterTuningJobRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_hyperparameter_tuning_job' not in self._stubs: - self._stubs['delete_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/DeleteHyperparameterTuningJob', - request_serializer=job_service.DeleteHyperparameterTuningJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_hyperparameter_tuning_job'] - - @property - def cancel_hyperparameter_tuning_job(self) -> Callable[ - [job_service.CancelHyperparameterTuningJobRequest], - empty_pb2.Empty]: - r"""Return a callable for the cancel hyperparameter tuning - job method over gRPC. - - Cancels a HyperparameterTuningJob. Starts asynchronous - cancellation on the HyperparameterTuningJob. The server makes a - best effort to cancel the job, but success is not guaranteed. - Clients can use - [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.GetHyperparameterTuningJob] - or other methods to check whether the cancellation succeeded or - whether the job completed despite cancellation. On successful - cancellation, the HyperparameterTuningJob is not deleted; - instead it becomes a job with a - [HyperparameterTuningJob.error][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [HyperparameterTuningJob.state][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.state] - is set to ``CANCELLED``. - - Returns: - Callable[[~.CancelHyperparameterTuningJobRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'cancel_hyperparameter_tuning_job' not in self._stubs: - self._stubs['cancel_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/CancelHyperparameterTuningJob', - request_serializer=job_service.CancelHyperparameterTuningJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['cancel_hyperparameter_tuning_job'] - - @property - def create_batch_prediction_job(self) -> Callable[ - [job_service.CreateBatchPredictionJobRequest], - gca_batch_prediction_job.BatchPredictionJob]: - r"""Return a callable for the create batch prediction job method over gRPC. - - Creates a BatchPredictionJob. A BatchPredictionJob - once created will right away be attempted to start. - - Returns: - Callable[[~.CreateBatchPredictionJobRequest], - ~.BatchPredictionJob]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_batch_prediction_job' not in self._stubs: - self._stubs['create_batch_prediction_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/CreateBatchPredictionJob', - request_serializer=job_service.CreateBatchPredictionJobRequest.serialize, - response_deserializer=gca_batch_prediction_job.BatchPredictionJob.deserialize, - ) - return self._stubs['create_batch_prediction_job'] - - @property - def get_batch_prediction_job(self) -> Callable[ - [job_service.GetBatchPredictionJobRequest], - batch_prediction_job.BatchPredictionJob]: - r"""Return a callable for the get batch prediction job method over gRPC. - - Gets a BatchPredictionJob - - Returns: - Callable[[~.GetBatchPredictionJobRequest], - ~.BatchPredictionJob]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_batch_prediction_job' not in self._stubs: - self._stubs['get_batch_prediction_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/GetBatchPredictionJob', - request_serializer=job_service.GetBatchPredictionJobRequest.serialize, - response_deserializer=batch_prediction_job.BatchPredictionJob.deserialize, - ) - return self._stubs['get_batch_prediction_job'] - - @property - def list_batch_prediction_jobs(self) -> Callable[ - [job_service.ListBatchPredictionJobsRequest], - job_service.ListBatchPredictionJobsResponse]: - r"""Return a callable for the list batch prediction jobs method over gRPC. - - Lists BatchPredictionJobs in a Location. - - Returns: - Callable[[~.ListBatchPredictionJobsRequest], - ~.ListBatchPredictionJobsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_batch_prediction_jobs' not in self._stubs: - self._stubs['list_batch_prediction_jobs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/ListBatchPredictionJobs', - request_serializer=job_service.ListBatchPredictionJobsRequest.serialize, - response_deserializer=job_service.ListBatchPredictionJobsResponse.deserialize, - ) - return self._stubs['list_batch_prediction_jobs'] - - @property - def delete_batch_prediction_job(self) -> Callable[ - [job_service.DeleteBatchPredictionJobRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete batch prediction job method over gRPC. - - Deletes a BatchPredictionJob. Can only be called on - jobs that already finished. - - Returns: - Callable[[~.DeleteBatchPredictionJobRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_batch_prediction_job' not in self._stubs: - self._stubs['delete_batch_prediction_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/DeleteBatchPredictionJob', - request_serializer=job_service.DeleteBatchPredictionJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_batch_prediction_job'] - - @property - def cancel_batch_prediction_job(self) -> Callable[ - [job_service.CancelBatchPredictionJobRequest], - empty_pb2.Empty]: - r"""Return a callable for the cancel batch prediction job method over gRPC. - - Cancels a BatchPredictionJob. - - Starts asynchronous cancellation on the BatchPredictionJob. The - server makes the best effort to cancel the job, but success is - not guaranteed. Clients can use - [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.GetBatchPredictionJob] - or other methods to check whether the cancellation succeeded or - whether the job completed despite cancellation. On a successful - cancellation, the BatchPredictionJob is not deleted;instead its - [BatchPredictionJob.state][google.cloud.aiplatform.v1beta1.BatchPredictionJob.state] - is set to ``CANCELLED``. Any files already outputted by the job - are not deleted. - - Returns: - Callable[[~.CancelBatchPredictionJobRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'cancel_batch_prediction_job' not in self._stubs: - self._stubs['cancel_batch_prediction_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/CancelBatchPredictionJob', - request_serializer=job_service.CancelBatchPredictionJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['cancel_batch_prediction_job'] - - @property - def create_model_deployment_monitoring_job(self) -> Callable[ - [job_service.CreateModelDeploymentMonitoringJobRequest], - gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob]: - r"""Return a callable for the create model deployment - monitoring job method over gRPC. - - Creates a ModelDeploymentMonitoringJob. It will run - periodically on a configured interval. - - Returns: - Callable[[~.CreateModelDeploymentMonitoringJobRequest], - ~.ModelDeploymentMonitoringJob]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_model_deployment_monitoring_job' not in self._stubs: - self._stubs['create_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/CreateModelDeploymentMonitoringJob', - request_serializer=job_service.CreateModelDeploymentMonitoringJobRequest.serialize, - response_deserializer=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.deserialize, - ) - return self._stubs['create_model_deployment_monitoring_job'] - - @property - def search_model_deployment_monitoring_stats_anomalies(self) -> Callable[ - [job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest], - job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse]: - r"""Return a callable for the search model deployment - monitoring stats anomalies method over gRPC. - - Searches Model Monitoring Statistics generated within - a given time window. - - Returns: - Callable[[~.SearchModelDeploymentMonitoringStatsAnomaliesRequest], - ~.SearchModelDeploymentMonitoringStatsAnomaliesResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'search_model_deployment_monitoring_stats_anomalies' not in self._stubs: - self._stubs['search_model_deployment_monitoring_stats_anomalies'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/SearchModelDeploymentMonitoringStatsAnomalies', - request_serializer=job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest.serialize, - response_deserializer=job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse.deserialize, - ) - return self._stubs['search_model_deployment_monitoring_stats_anomalies'] - - @property - def get_model_deployment_monitoring_job(self) -> Callable[ - [job_service.GetModelDeploymentMonitoringJobRequest], - model_deployment_monitoring_job.ModelDeploymentMonitoringJob]: - r"""Return a callable for the get model deployment - monitoring job method over gRPC. - - Gets a ModelDeploymentMonitoringJob. - - Returns: - Callable[[~.GetModelDeploymentMonitoringJobRequest], - ~.ModelDeploymentMonitoringJob]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_model_deployment_monitoring_job' not in self._stubs: - self._stubs['get_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/GetModelDeploymentMonitoringJob', - request_serializer=job_service.GetModelDeploymentMonitoringJobRequest.serialize, - response_deserializer=model_deployment_monitoring_job.ModelDeploymentMonitoringJob.deserialize, - ) - return self._stubs['get_model_deployment_monitoring_job'] - - @property - def list_model_deployment_monitoring_jobs(self) -> Callable[ - [job_service.ListModelDeploymentMonitoringJobsRequest], - job_service.ListModelDeploymentMonitoringJobsResponse]: - r"""Return a callable for the list model deployment - monitoring jobs method over gRPC. - - Lists ModelDeploymentMonitoringJobs in a Location. - - Returns: - Callable[[~.ListModelDeploymentMonitoringJobsRequest], - ~.ListModelDeploymentMonitoringJobsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_model_deployment_monitoring_jobs' not in self._stubs: - self._stubs['list_model_deployment_monitoring_jobs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/ListModelDeploymentMonitoringJobs', - request_serializer=job_service.ListModelDeploymentMonitoringJobsRequest.serialize, - response_deserializer=job_service.ListModelDeploymentMonitoringJobsResponse.deserialize, - ) - return self._stubs['list_model_deployment_monitoring_jobs'] - - @property - def update_model_deployment_monitoring_job(self) -> Callable[ - [job_service.UpdateModelDeploymentMonitoringJobRequest], - operations_pb2.Operation]: - r"""Return a callable for the update model deployment - monitoring job method over gRPC. - - Updates a ModelDeploymentMonitoringJob. - - Returns: - Callable[[~.UpdateModelDeploymentMonitoringJobRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_model_deployment_monitoring_job' not in self._stubs: - self._stubs['update_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/UpdateModelDeploymentMonitoringJob', - request_serializer=job_service.UpdateModelDeploymentMonitoringJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['update_model_deployment_monitoring_job'] - - @property - def delete_model_deployment_monitoring_job(self) -> Callable[ - [job_service.DeleteModelDeploymentMonitoringJobRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete model deployment - monitoring job method over gRPC. - - Deletes a ModelDeploymentMonitoringJob. - - Returns: - Callable[[~.DeleteModelDeploymentMonitoringJobRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_model_deployment_monitoring_job' not in self._stubs: - self._stubs['delete_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/DeleteModelDeploymentMonitoringJob', - request_serializer=job_service.DeleteModelDeploymentMonitoringJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_model_deployment_monitoring_job'] - - @property - def pause_model_deployment_monitoring_job(self) -> Callable[ - [job_service.PauseModelDeploymentMonitoringJobRequest], - empty_pb2.Empty]: - r"""Return a callable for the pause model deployment - monitoring job method over gRPC. - - Pauses a ModelDeploymentMonitoringJob. If the job is running, - the server makes a best effort to cancel the job. Will mark - [ModelDeploymentMonitoringJob.state][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringJob.state] - to 'PAUSED'. - - Returns: - Callable[[~.PauseModelDeploymentMonitoringJobRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'pause_model_deployment_monitoring_job' not in self._stubs: - self._stubs['pause_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/PauseModelDeploymentMonitoringJob', - request_serializer=job_service.PauseModelDeploymentMonitoringJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['pause_model_deployment_monitoring_job'] - - @property - def resume_model_deployment_monitoring_job(self) -> Callable[ - [job_service.ResumeModelDeploymentMonitoringJobRequest], - empty_pb2.Empty]: - r"""Return a callable for the resume model deployment - monitoring job method over gRPC. - - Resumes a paused ModelDeploymentMonitoringJob. It - will start to run from next scheduled time. A deleted - ModelDeploymentMonitoringJob can't be resumed. - - Returns: - Callable[[~.ResumeModelDeploymentMonitoringJobRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'resume_model_deployment_monitoring_job' not in self._stubs: - self._stubs['resume_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/ResumeModelDeploymentMonitoringJob', - request_serializer=job_service.ResumeModelDeploymentMonitoringJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['resume_model_deployment_monitoring_job'] - - def close(self): - self.grpc_channel.close() - -__all__ = ( - 'JobServiceGrpcTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc_asyncio.py deleted file mode 100644 index 2bb2c9b632..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/job_service/transports/grpc_asyncio.py +++ /dev/null @@ -1,1049 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers_async -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.aiplatform_v1beta1.types import batch_prediction_job -from google.cloud.aiplatform_v1beta1.types import batch_prediction_job as gca_batch_prediction_job -from google.cloud.aiplatform_v1beta1.types import custom_job -from google.cloud.aiplatform_v1beta1.types import custom_job as gca_custom_job -from google.cloud.aiplatform_v1beta1.types import data_labeling_job -from google.cloud.aiplatform_v1beta1.types import data_labeling_job as gca_data_labeling_job -from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job -from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job as gca_hyperparameter_tuning_job -from google.cloud.aiplatform_v1beta1.types import job_service -from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job -from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job as gca_model_deployment_monitoring_job -from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from .base import JobServiceTransport, DEFAULT_CLIENT_INFO -from .grpc import JobServiceGrpcTransport - - -class JobServiceGrpcAsyncIOTransport(JobServiceTransport): - """gRPC AsyncIO backend transport for JobService. - - A service for creating and managing Vertex AI's jobs. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsAsyncClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_custom_job(self) -> Callable[ - [job_service.CreateCustomJobRequest], - Awaitable[gca_custom_job.CustomJob]]: - r"""Return a callable for the create custom job method over gRPC. - - Creates a CustomJob. A created CustomJob right away - will be attempted to be run. - - Returns: - Callable[[~.CreateCustomJobRequest], - Awaitable[~.CustomJob]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_custom_job' not in self._stubs: - self._stubs['create_custom_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/CreateCustomJob', - request_serializer=job_service.CreateCustomJobRequest.serialize, - response_deserializer=gca_custom_job.CustomJob.deserialize, - ) - return self._stubs['create_custom_job'] - - @property - def get_custom_job(self) -> Callable[ - [job_service.GetCustomJobRequest], - Awaitable[custom_job.CustomJob]]: - r"""Return a callable for the get custom job method over gRPC. - - Gets a CustomJob. - - Returns: - Callable[[~.GetCustomJobRequest], - Awaitable[~.CustomJob]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_custom_job' not in self._stubs: - self._stubs['get_custom_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/GetCustomJob', - request_serializer=job_service.GetCustomJobRequest.serialize, - response_deserializer=custom_job.CustomJob.deserialize, - ) - return self._stubs['get_custom_job'] - - @property - def list_custom_jobs(self) -> Callable[ - [job_service.ListCustomJobsRequest], - Awaitable[job_service.ListCustomJobsResponse]]: - r"""Return a callable for the list custom jobs method over gRPC. - - Lists CustomJobs in a Location. - - Returns: - Callable[[~.ListCustomJobsRequest], - Awaitable[~.ListCustomJobsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_custom_jobs' not in self._stubs: - self._stubs['list_custom_jobs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/ListCustomJobs', - request_serializer=job_service.ListCustomJobsRequest.serialize, - response_deserializer=job_service.ListCustomJobsResponse.deserialize, - ) - return self._stubs['list_custom_jobs'] - - @property - def delete_custom_job(self) -> Callable[ - [job_service.DeleteCustomJobRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete custom job method over gRPC. - - Deletes a CustomJob. - - Returns: - Callable[[~.DeleteCustomJobRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_custom_job' not in self._stubs: - self._stubs['delete_custom_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/DeleteCustomJob', - request_serializer=job_service.DeleteCustomJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_custom_job'] - - @property - def cancel_custom_job(self) -> Callable[ - [job_service.CancelCustomJobRequest], - Awaitable[empty_pb2.Empty]]: - r"""Return a callable for the cancel custom job method over gRPC. - - Cancels a CustomJob. Starts asynchronous cancellation on the - CustomJob. The server makes a best effort to cancel the job, but - success is not guaranteed. Clients can use - [JobService.GetCustomJob][google.cloud.aiplatform.v1beta1.JobService.GetCustomJob] - or other methods to check whether the cancellation succeeded or - whether the job completed despite cancellation. On successful - cancellation, the CustomJob is not deleted; instead it becomes a - job with a - [CustomJob.error][google.cloud.aiplatform.v1beta1.CustomJob.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [CustomJob.state][google.cloud.aiplatform.v1beta1.CustomJob.state] - is set to ``CANCELLED``. - - Returns: - Callable[[~.CancelCustomJobRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'cancel_custom_job' not in self._stubs: - self._stubs['cancel_custom_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/CancelCustomJob', - request_serializer=job_service.CancelCustomJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['cancel_custom_job'] - - @property - def create_data_labeling_job(self) -> Callable[ - [job_service.CreateDataLabelingJobRequest], - Awaitable[gca_data_labeling_job.DataLabelingJob]]: - r"""Return a callable for the create data labeling job method over gRPC. - - Creates a DataLabelingJob. - - Returns: - Callable[[~.CreateDataLabelingJobRequest], - Awaitable[~.DataLabelingJob]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_data_labeling_job' not in self._stubs: - self._stubs['create_data_labeling_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/CreateDataLabelingJob', - request_serializer=job_service.CreateDataLabelingJobRequest.serialize, - response_deserializer=gca_data_labeling_job.DataLabelingJob.deserialize, - ) - return self._stubs['create_data_labeling_job'] - - @property - def get_data_labeling_job(self) -> Callable[ - [job_service.GetDataLabelingJobRequest], - Awaitable[data_labeling_job.DataLabelingJob]]: - r"""Return a callable for the get data labeling job method over gRPC. - - Gets a DataLabelingJob. - - Returns: - Callable[[~.GetDataLabelingJobRequest], - Awaitable[~.DataLabelingJob]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_data_labeling_job' not in self._stubs: - self._stubs['get_data_labeling_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/GetDataLabelingJob', - request_serializer=job_service.GetDataLabelingJobRequest.serialize, - response_deserializer=data_labeling_job.DataLabelingJob.deserialize, - ) - return self._stubs['get_data_labeling_job'] - - @property - def list_data_labeling_jobs(self) -> Callable[ - [job_service.ListDataLabelingJobsRequest], - Awaitable[job_service.ListDataLabelingJobsResponse]]: - r"""Return a callable for the list data labeling jobs method over gRPC. - - Lists DataLabelingJobs in a Location. - - Returns: - Callable[[~.ListDataLabelingJobsRequest], - Awaitable[~.ListDataLabelingJobsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_data_labeling_jobs' not in self._stubs: - self._stubs['list_data_labeling_jobs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/ListDataLabelingJobs', - request_serializer=job_service.ListDataLabelingJobsRequest.serialize, - response_deserializer=job_service.ListDataLabelingJobsResponse.deserialize, - ) - return self._stubs['list_data_labeling_jobs'] - - @property - def delete_data_labeling_job(self) -> Callable[ - [job_service.DeleteDataLabelingJobRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete data labeling job method over gRPC. - - Deletes a DataLabelingJob. - - Returns: - Callable[[~.DeleteDataLabelingJobRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_data_labeling_job' not in self._stubs: - self._stubs['delete_data_labeling_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/DeleteDataLabelingJob', - request_serializer=job_service.DeleteDataLabelingJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_data_labeling_job'] - - @property - def cancel_data_labeling_job(self) -> Callable[ - [job_service.CancelDataLabelingJobRequest], - Awaitable[empty_pb2.Empty]]: - r"""Return a callable for the cancel data labeling job method over gRPC. - - Cancels a DataLabelingJob. Success of cancellation is - not guaranteed. - - Returns: - Callable[[~.CancelDataLabelingJobRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'cancel_data_labeling_job' not in self._stubs: - self._stubs['cancel_data_labeling_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/CancelDataLabelingJob', - request_serializer=job_service.CancelDataLabelingJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['cancel_data_labeling_job'] - - @property - def create_hyperparameter_tuning_job(self) -> Callable[ - [job_service.CreateHyperparameterTuningJobRequest], - Awaitable[gca_hyperparameter_tuning_job.HyperparameterTuningJob]]: - r"""Return a callable for the create hyperparameter tuning - job method over gRPC. - - Creates a HyperparameterTuningJob - - Returns: - Callable[[~.CreateHyperparameterTuningJobRequest], - Awaitable[~.HyperparameterTuningJob]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_hyperparameter_tuning_job' not in self._stubs: - self._stubs['create_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/CreateHyperparameterTuningJob', - request_serializer=job_service.CreateHyperparameterTuningJobRequest.serialize, - response_deserializer=gca_hyperparameter_tuning_job.HyperparameterTuningJob.deserialize, - ) - return self._stubs['create_hyperparameter_tuning_job'] - - @property - def get_hyperparameter_tuning_job(self) -> Callable[ - [job_service.GetHyperparameterTuningJobRequest], - Awaitable[hyperparameter_tuning_job.HyperparameterTuningJob]]: - r"""Return a callable for the get hyperparameter tuning job method over gRPC. - - Gets a HyperparameterTuningJob - - Returns: - Callable[[~.GetHyperparameterTuningJobRequest], - Awaitable[~.HyperparameterTuningJob]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_hyperparameter_tuning_job' not in self._stubs: - self._stubs['get_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/GetHyperparameterTuningJob', - request_serializer=job_service.GetHyperparameterTuningJobRequest.serialize, - response_deserializer=hyperparameter_tuning_job.HyperparameterTuningJob.deserialize, - ) - return self._stubs['get_hyperparameter_tuning_job'] - - @property - def list_hyperparameter_tuning_jobs(self) -> Callable[ - [job_service.ListHyperparameterTuningJobsRequest], - Awaitable[job_service.ListHyperparameterTuningJobsResponse]]: - r"""Return a callable for the list hyperparameter tuning - jobs method over gRPC. - - Lists HyperparameterTuningJobs in a Location. - - Returns: - Callable[[~.ListHyperparameterTuningJobsRequest], - Awaitable[~.ListHyperparameterTuningJobsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_hyperparameter_tuning_jobs' not in self._stubs: - self._stubs['list_hyperparameter_tuning_jobs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/ListHyperparameterTuningJobs', - request_serializer=job_service.ListHyperparameterTuningJobsRequest.serialize, - response_deserializer=job_service.ListHyperparameterTuningJobsResponse.deserialize, - ) - return self._stubs['list_hyperparameter_tuning_jobs'] - - @property - def delete_hyperparameter_tuning_job(self) -> Callable[ - [job_service.DeleteHyperparameterTuningJobRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete hyperparameter tuning - job method over gRPC. - - Deletes a HyperparameterTuningJob. - - Returns: - Callable[[~.DeleteHyperparameterTuningJobRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_hyperparameter_tuning_job' not in self._stubs: - self._stubs['delete_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/DeleteHyperparameterTuningJob', - request_serializer=job_service.DeleteHyperparameterTuningJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_hyperparameter_tuning_job'] - - @property - def cancel_hyperparameter_tuning_job(self) -> Callable[ - [job_service.CancelHyperparameterTuningJobRequest], - Awaitable[empty_pb2.Empty]]: - r"""Return a callable for the cancel hyperparameter tuning - job method over gRPC. - - Cancels a HyperparameterTuningJob. Starts asynchronous - cancellation on the HyperparameterTuningJob. The server makes a - best effort to cancel the job, but success is not guaranteed. - Clients can use - [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.GetHyperparameterTuningJob] - or other methods to check whether the cancellation succeeded or - whether the job completed despite cancellation. On successful - cancellation, the HyperparameterTuningJob is not deleted; - instead it becomes a job with a - [HyperparameterTuningJob.error][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [HyperparameterTuningJob.state][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.state] - is set to ``CANCELLED``. - - Returns: - Callable[[~.CancelHyperparameterTuningJobRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'cancel_hyperparameter_tuning_job' not in self._stubs: - self._stubs['cancel_hyperparameter_tuning_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/CancelHyperparameterTuningJob', - request_serializer=job_service.CancelHyperparameterTuningJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['cancel_hyperparameter_tuning_job'] - - @property - def create_batch_prediction_job(self) -> Callable[ - [job_service.CreateBatchPredictionJobRequest], - Awaitable[gca_batch_prediction_job.BatchPredictionJob]]: - r"""Return a callable for the create batch prediction job method over gRPC. - - Creates a BatchPredictionJob. A BatchPredictionJob - once created will right away be attempted to start. - - Returns: - Callable[[~.CreateBatchPredictionJobRequest], - Awaitable[~.BatchPredictionJob]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_batch_prediction_job' not in self._stubs: - self._stubs['create_batch_prediction_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/CreateBatchPredictionJob', - request_serializer=job_service.CreateBatchPredictionJobRequest.serialize, - response_deserializer=gca_batch_prediction_job.BatchPredictionJob.deserialize, - ) - return self._stubs['create_batch_prediction_job'] - - @property - def get_batch_prediction_job(self) -> Callable[ - [job_service.GetBatchPredictionJobRequest], - Awaitable[batch_prediction_job.BatchPredictionJob]]: - r"""Return a callable for the get batch prediction job method over gRPC. - - Gets a BatchPredictionJob - - Returns: - Callable[[~.GetBatchPredictionJobRequest], - Awaitable[~.BatchPredictionJob]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_batch_prediction_job' not in self._stubs: - self._stubs['get_batch_prediction_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/GetBatchPredictionJob', - request_serializer=job_service.GetBatchPredictionJobRequest.serialize, - response_deserializer=batch_prediction_job.BatchPredictionJob.deserialize, - ) - return self._stubs['get_batch_prediction_job'] - - @property - def list_batch_prediction_jobs(self) -> Callable[ - [job_service.ListBatchPredictionJobsRequest], - Awaitable[job_service.ListBatchPredictionJobsResponse]]: - r"""Return a callable for the list batch prediction jobs method over gRPC. - - Lists BatchPredictionJobs in a Location. - - Returns: - Callable[[~.ListBatchPredictionJobsRequest], - Awaitable[~.ListBatchPredictionJobsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_batch_prediction_jobs' not in self._stubs: - self._stubs['list_batch_prediction_jobs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/ListBatchPredictionJobs', - request_serializer=job_service.ListBatchPredictionJobsRequest.serialize, - response_deserializer=job_service.ListBatchPredictionJobsResponse.deserialize, - ) - return self._stubs['list_batch_prediction_jobs'] - - @property - def delete_batch_prediction_job(self) -> Callable[ - [job_service.DeleteBatchPredictionJobRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete batch prediction job method over gRPC. - - Deletes a BatchPredictionJob. Can only be called on - jobs that already finished. - - Returns: - Callable[[~.DeleteBatchPredictionJobRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_batch_prediction_job' not in self._stubs: - self._stubs['delete_batch_prediction_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/DeleteBatchPredictionJob', - request_serializer=job_service.DeleteBatchPredictionJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_batch_prediction_job'] - - @property - def cancel_batch_prediction_job(self) -> Callable[ - [job_service.CancelBatchPredictionJobRequest], - Awaitable[empty_pb2.Empty]]: - r"""Return a callable for the cancel batch prediction job method over gRPC. - - Cancels a BatchPredictionJob. - - Starts asynchronous cancellation on the BatchPredictionJob. The - server makes the best effort to cancel the job, but success is - not guaranteed. Clients can use - [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.GetBatchPredictionJob] - or other methods to check whether the cancellation succeeded or - whether the job completed despite cancellation. On a successful - cancellation, the BatchPredictionJob is not deleted;instead its - [BatchPredictionJob.state][google.cloud.aiplatform.v1beta1.BatchPredictionJob.state] - is set to ``CANCELLED``. Any files already outputted by the job - are not deleted. - - Returns: - Callable[[~.CancelBatchPredictionJobRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'cancel_batch_prediction_job' not in self._stubs: - self._stubs['cancel_batch_prediction_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/CancelBatchPredictionJob', - request_serializer=job_service.CancelBatchPredictionJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['cancel_batch_prediction_job'] - - @property - def create_model_deployment_monitoring_job(self) -> Callable[ - [job_service.CreateModelDeploymentMonitoringJobRequest], - Awaitable[gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob]]: - r"""Return a callable for the create model deployment - monitoring job method over gRPC. - - Creates a ModelDeploymentMonitoringJob. It will run - periodically on a configured interval. - - Returns: - Callable[[~.CreateModelDeploymentMonitoringJobRequest], - Awaitable[~.ModelDeploymentMonitoringJob]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_model_deployment_monitoring_job' not in self._stubs: - self._stubs['create_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/CreateModelDeploymentMonitoringJob', - request_serializer=job_service.CreateModelDeploymentMonitoringJobRequest.serialize, - response_deserializer=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.deserialize, - ) - return self._stubs['create_model_deployment_monitoring_job'] - - @property - def search_model_deployment_monitoring_stats_anomalies(self) -> Callable[ - [job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest], - Awaitable[job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse]]: - r"""Return a callable for the search model deployment - monitoring stats anomalies method over gRPC. - - Searches Model Monitoring Statistics generated within - a given time window. - - Returns: - Callable[[~.SearchModelDeploymentMonitoringStatsAnomaliesRequest], - Awaitable[~.SearchModelDeploymentMonitoringStatsAnomaliesResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'search_model_deployment_monitoring_stats_anomalies' not in self._stubs: - self._stubs['search_model_deployment_monitoring_stats_anomalies'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/SearchModelDeploymentMonitoringStatsAnomalies', - request_serializer=job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest.serialize, - response_deserializer=job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse.deserialize, - ) - return self._stubs['search_model_deployment_monitoring_stats_anomalies'] - - @property - def get_model_deployment_monitoring_job(self) -> Callable[ - [job_service.GetModelDeploymentMonitoringJobRequest], - Awaitable[model_deployment_monitoring_job.ModelDeploymentMonitoringJob]]: - r"""Return a callable for the get model deployment - monitoring job method over gRPC. - - Gets a ModelDeploymentMonitoringJob. - - Returns: - Callable[[~.GetModelDeploymentMonitoringJobRequest], - Awaitable[~.ModelDeploymentMonitoringJob]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_model_deployment_monitoring_job' not in self._stubs: - self._stubs['get_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/GetModelDeploymentMonitoringJob', - request_serializer=job_service.GetModelDeploymentMonitoringJobRequest.serialize, - response_deserializer=model_deployment_monitoring_job.ModelDeploymentMonitoringJob.deserialize, - ) - return self._stubs['get_model_deployment_monitoring_job'] - - @property - def list_model_deployment_monitoring_jobs(self) -> Callable[ - [job_service.ListModelDeploymentMonitoringJobsRequest], - Awaitable[job_service.ListModelDeploymentMonitoringJobsResponse]]: - r"""Return a callable for the list model deployment - monitoring jobs method over gRPC. - - Lists ModelDeploymentMonitoringJobs in a Location. - - Returns: - Callable[[~.ListModelDeploymentMonitoringJobsRequest], - Awaitable[~.ListModelDeploymentMonitoringJobsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_model_deployment_monitoring_jobs' not in self._stubs: - self._stubs['list_model_deployment_monitoring_jobs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/ListModelDeploymentMonitoringJobs', - request_serializer=job_service.ListModelDeploymentMonitoringJobsRequest.serialize, - response_deserializer=job_service.ListModelDeploymentMonitoringJobsResponse.deserialize, - ) - return self._stubs['list_model_deployment_monitoring_jobs'] - - @property - def update_model_deployment_monitoring_job(self) -> Callable[ - [job_service.UpdateModelDeploymentMonitoringJobRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the update model deployment - monitoring job method over gRPC. - - Updates a ModelDeploymentMonitoringJob. - - Returns: - Callable[[~.UpdateModelDeploymentMonitoringJobRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_model_deployment_monitoring_job' not in self._stubs: - self._stubs['update_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/UpdateModelDeploymentMonitoringJob', - request_serializer=job_service.UpdateModelDeploymentMonitoringJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['update_model_deployment_monitoring_job'] - - @property - def delete_model_deployment_monitoring_job(self) -> Callable[ - [job_service.DeleteModelDeploymentMonitoringJobRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete model deployment - monitoring job method over gRPC. - - Deletes a ModelDeploymentMonitoringJob. - - Returns: - Callable[[~.DeleteModelDeploymentMonitoringJobRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_model_deployment_monitoring_job' not in self._stubs: - self._stubs['delete_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/DeleteModelDeploymentMonitoringJob', - request_serializer=job_service.DeleteModelDeploymentMonitoringJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_model_deployment_monitoring_job'] - - @property - def pause_model_deployment_monitoring_job(self) -> Callable[ - [job_service.PauseModelDeploymentMonitoringJobRequest], - Awaitable[empty_pb2.Empty]]: - r"""Return a callable for the pause model deployment - monitoring job method over gRPC. - - Pauses a ModelDeploymentMonitoringJob. If the job is running, - the server makes a best effort to cancel the job. Will mark - [ModelDeploymentMonitoringJob.state][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringJob.state] - to 'PAUSED'. - - Returns: - Callable[[~.PauseModelDeploymentMonitoringJobRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'pause_model_deployment_monitoring_job' not in self._stubs: - self._stubs['pause_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/PauseModelDeploymentMonitoringJob', - request_serializer=job_service.PauseModelDeploymentMonitoringJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['pause_model_deployment_monitoring_job'] - - @property - def resume_model_deployment_monitoring_job(self) -> Callable[ - [job_service.ResumeModelDeploymentMonitoringJobRequest], - Awaitable[empty_pb2.Empty]]: - r"""Return a callable for the resume model deployment - monitoring job method over gRPC. - - Resumes a paused ModelDeploymentMonitoringJob. It - will start to run from next scheduled time. A deleted - ModelDeploymentMonitoringJob can't be resumed. - - Returns: - Callable[[~.ResumeModelDeploymentMonitoringJobRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'resume_model_deployment_monitoring_job' not in self._stubs: - self._stubs['resume_model_deployment_monitoring_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.JobService/ResumeModelDeploymentMonitoringJob', - request_serializer=job_service.ResumeModelDeploymentMonitoringJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['resume_model_deployment_monitoring_job'] - - def close(self): - return self.grpc_channel.close() - - -__all__ = ( - 'JobServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/__init__.py deleted file mode 100644 index b0a31fc612..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import MetadataServiceClient -from .async_client import MetadataServiceAsyncClient - -__all__ = ( - 'MetadataServiceClient', - 'MetadataServiceAsyncClient', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/async_client.py deleted file mode 100644 index 867eb1a2ff..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/async_client.py +++ /dev/null @@ -1,2980 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core.client_options import ClientOptions -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.metadata_service import pagers -from google.cloud.aiplatform_v1beta1.types import artifact -from google.cloud.aiplatform_v1beta1.types import artifact as gca_artifact -from google.cloud.aiplatform_v1beta1.types import context -from google.cloud.aiplatform_v1beta1.types import context as gca_context -from google.cloud.aiplatform_v1beta1.types import encryption_spec -from google.cloud.aiplatform_v1beta1.types import event -from google.cloud.aiplatform_v1beta1.types import execution -from google.cloud.aiplatform_v1beta1.types import execution as gca_execution -from google.cloud.aiplatform_v1beta1.types import lineage_subgraph -from google.cloud.aiplatform_v1beta1.types import metadata_schema -from google.cloud.aiplatform_v1beta1.types import metadata_schema as gca_metadata_schema -from google.cloud.aiplatform_v1beta1.types import metadata_service -from google.cloud.aiplatform_v1beta1.types import metadata_store -from google.cloud.aiplatform_v1beta1.types import metadata_store as gca_metadata_store -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import MetadataServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import MetadataServiceGrpcAsyncIOTransport -from .client import MetadataServiceClient - - -class MetadataServiceAsyncClient: - """Service for reading and writing metadata entries.""" - - _client: MetadataServiceClient - - DEFAULT_ENDPOINT = MetadataServiceClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = MetadataServiceClient.DEFAULT_MTLS_ENDPOINT - - artifact_path = staticmethod(MetadataServiceClient.artifact_path) - parse_artifact_path = staticmethod(MetadataServiceClient.parse_artifact_path) - context_path = staticmethod(MetadataServiceClient.context_path) - parse_context_path = staticmethod(MetadataServiceClient.parse_context_path) - execution_path = staticmethod(MetadataServiceClient.execution_path) - parse_execution_path = staticmethod(MetadataServiceClient.parse_execution_path) - metadata_schema_path = staticmethod(MetadataServiceClient.metadata_schema_path) - parse_metadata_schema_path = staticmethod(MetadataServiceClient.parse_metadata_schema_path) - metadata_store_path = staticmethod(MetadataServiceClient.metadata_store_path) - parse_metadata_store_path = staticmethod(MetadataServiceClient.parse_metadata_store_path) - common_billing_account_path = staticmethod(MetadataServiceClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(MetadataServiceClient.parse_common_billing_account_path) - common_folder_path = staticmethod(MetadataServiceClient.common_folder_path) - parse_common_folder_path = staticmethod(MetadataServiceClient.parse_common_folder_path) - common_organization_path = staticmethod(MetadataServiceClient.common_organization_path) - parse_common_organization_path = staticmethod(MetadataServiceClient.parse_common_organization_path) - common_project_path = staticmethod(MetadataServiceClient.common_project_path) - parse_common_project_path = staticmethod(MetadataServiceClient.parse_common_project_path) - common_location_path = staticmethod(MetadataServiceClient.common_location_path) - parse_common_location_path = staticmethod(MetadataServiceClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - MetadataServiceAsyncClient: The constructed client. - """ - return MetadataServiceClient.from_service_account_info.__func__(MetadataServiceAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - MetadataServiceAsyncClient: The constructed client. - """ - return MetadataServiceClient.from_service_account_file.__func__(MetadataServiceAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> MetadataServiceTransport: - """Returns the transport used by the client instance. - - Returns: - MetadataServiceTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(MetadataServiceClient).get_transport_class, type(MetadataServiceClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, MetadataServiceTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the metadata service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.MetadataServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = MetadataServiceClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def create_metadata_store(self, - request: Union[metadata_service.CreateMetadataStoreRequest, dict] = None, - *, - parent: str = None, - metadata_store: gca_metadata_store.MetadataStore = None, - metadata_store_id: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Initializes a MetadataStore, including allocation of - resources. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CreateMetadataStoreRequest, dict]): - The request object. Request message for - [MetadataService.CreateMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataStore]. - parent (:class:`str`): - Required. The resource name of the Location where the - MetadataStore should be created. Format: - ``projects/{project}/locations/{location}/`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - metadata_store (:class:`google.cloud.aiplatform_v1beta1.types.MetadataStore`): - Required. The MetadataStore to - create. - - This corresponds to the ``metadata_store`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - metadata_store_id (:class:`str`): - The {metadatastore} portion of the resource name with - the format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - If not provided, the MetadataStore's ID will be a UUID - generated by the service. Must be 4-128 characters in - length. Valid characters are ``/[a-z][0-9]-/``. Must be - unique across all MetadataStores in the parent Location. - (Otherwise the request will fail with ALREADY_EXISTS, or - PERMISSION_DENIED if the caller can't view the - preexisting MetadataStore.) - - This corresponds to the ``metadata_store_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.MetadataStore` Instance of a metadata store. Contains a set of metadata that can be - queried. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, metadata_store, metadata_store_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.CreateMetadataStoreRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if metadata_store is not None: - request.metadata_store = metadata_store - if metadata_store_id is not None: - request.metadata_store_id = metadata_store_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_metadata_store, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - gca_metadata_store.MetadataStore, - metadata_type=metadata_service.CreateMetadataStoreOperationMetadata, - ) - - # Done; return the response. - return response - - async def get_metadata_store(self, - request: Union[metadata_service.GetMetadataStoreRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> metadata_store.MetadataStore: - r"""Retrieves a specific MetadataStore. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.GetMetadataStoreRequest, dict]): - The request object. Request message for - [MetadataService.GetMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.GetMetadataStore]. - name (:class:`str`): - Required. The resource name of the MetadataStore to - retrieve. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.MetadataStore: - Instance of a metadata store. - Contains a set of metadata that can be - queried. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.GetMetadataStoreRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_metadata_store, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_metadata_stores(self, - request: Union[metadata_service.ListMetadataStoresRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListMetadataStoresAsyncPager: - r"""Lists MetadataStores for a Location. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListMetadataStoresRequest, dict]): - The request object. Request message for - [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataStores]. - parent (:class:`str`): - Required. The Location whose MetadataStores should be - listed. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListMetadataStoresAsyncPager: - Response message for - [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataStores]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.ListMetadataStoresRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_metadata_stores, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListMetadataStoresAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_metadata_store(self, - request: Union[metadata_service.DeleteMetadataStoreRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a single MetadataStore and all its child - resources (Artifacts, Executions, and Contexts). - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.DeleteMetadataStoreRequest, dict]): - The request object. Request message for - [MetadataService.DeleteMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.DeleteMetadataStore]. - name (:class:`str`): - Required. The resource name of the MetadataStore to - delete. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.DeleteMetadataStoreRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_metadata_store, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=metadata_service.DeleteMetadataStoreOperationMetadata, - ) - - # Done; return the response. - return response - - async def create_artifact(self, - request: Union[metadata_service.CreateArtifactRequest, dict] = None, - *, - parent: str = None, - artifact: gca_artifact.Artifact = None, - artifact_id: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_artifact.Artifact: - r"""Creates an Artifact associated with a MetadataStore. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CreateArtifactRequest, dict]): - The request object. Request message for - [MetadataService.CreateArtifact][google.cloud.aiplatform.v1beta1.MetadataService.CreateArtifact]. - parent (:class:`str`): - Required. The resource name of the MetadataStore where - the Artifact should be created. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - artifact (:class:`google.cloud.aiplatform_v1beta1.types.Artifact`): - Required. The Artifact to create. - This corresponds to the ``artifact`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - artifact_id (:class:`str`): - The {artifact} portion of the resource name with the - format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` - If not provided, the Artifact's ID will be a UUID - generated by the service. Must be 4-128 characters in - length. Valid characters are ``/[a-z][0-9]-/``. Must be - unique across all Artifacts in the parent MetadataStore. - (Otherwise the request will fail with ALREADY_EXISTS, or - PERMISSION_DENIED if the caller can't view the - preexisting Artifact.) - - This corresponds to the ``artifact_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Artifact: - Instance of a general artifact. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, artifact, artifact_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.CreateArtifactRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if artifact is not None: - request.artifact = artifact - if artifact_id is not None: - request.artifact_id = artifact_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_artifact, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_artifact(self, - request: Union[metadata_service.GetArtifactRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> artifact.Artifact: - r"""Retrieves a specific Artifact. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.GetArtifactRequest, dict]): - The request object. Request message for - [MetadataService.GetArtifact][google.cloud.aiplatform.v1beta1.MetadataService.GetArtifact]. - name (:class:`str`): - Required. The resource name of the Artifact to retrieve. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Artifact: - Instance of a general artifact. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.GetArtifactRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_artifact, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_artifacts(self, - request: Union[metadata_service.ListArtifactsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListArtifactsAsyncPager: - r"""Lists Artifacts in the MetadataStore. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListArtifactsRequest, dict]): - The request object. Request message for - [MetadataService.ListArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.ListArtifacts]. - parent (:class:`str`): - Required. The MetadataStore whose Artifacts should be - listed. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListArtifactsAsyncPager: - Response message for - [MetadataService.ListArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.ListArtifacts]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.ListArtifactsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_artifacts, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListArtifactsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_artifact(self, - request: Union[metadata_service.UpdateArtifactRequest, dict] = None, - *, - artifact: gca_artifact.Artifact = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_artifact.Artifact: - r"""Updates a stored Artifact. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.UpdateArtifactRequest, dict]): - The request object. Request message for - [MetadataService.UpdateArtifact][google.cloud.aiplatform.v1beta1.MetadataService.UpdateArtifact]. - artifact (:class:`google.cloud.aiplatform_v1beta1.types.Artifact`): - Required. The Artifact containing updates. The - Artifact's - [Artifact.name][google.cloud.aiplatform.v1beta1.Artifact.name] - field is used to identify the Artifact to be updated. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` - - This corresponds to the ``artifact`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. A FieldMask indicating - which fields should be updated. - Functionality of this field is not yet - supported. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Artifact: - Instance of a general artifact. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([artifact, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.UpdateArtifactRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if artifact is not None: - request.artifact = artifact - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_artifact, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("artifact.name", request.artifact.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_artifact(self, - request: Union[metadata_service.DeleteArtifactRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes an Artifact. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.DeleteArtifactRequest, dict]): - The request object. Request message for - [MetadataService.DeleteArtifact][google.cloud.aiplatform.v1beta1.MetadataService.DeleteArtifact]. - name (:class:`str`): - Required. The resource name of the Artifact to delete. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.DeleteArtifactRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_artifact, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def purge_artifacts(self, - request: Union[metadata_service.PurgeArtifactsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Purges Artifacts. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.PurgeArtifactsRequest, dict]): - The request object. Request message for - [MetadataService.PurgeArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.PurgeArtifacts]. - parent (:class:`str`): - Required. The metadata store to purge Artifacts from. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.PurgeArtifactsResponse` - Response message for - [MetadataService.PurgeArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.PurgeArtifacts]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.PurgeArtifactsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.purge_artifacts, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - metadata_service.PurgeArtifactsResponse, - metadata_type=metadata_service.PurgeArtifactsMetadata, - ) - - # Done; return the response. - return response - - async def create_context(self, - request: Union[metadata_service.CreateContextRequest, dict] = None, - *, - parent: str = None, - context: gca_context.Context = None, - context_id: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_context.Context: - r"""Creates a Context associated with a MetadataStore. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CreateContextRequest, dict]): - The request object. Request message for - [MetadataService.CreateContext][google.cloud.aiplatform.v1beta1.MetadataService.CreateContext]. - parent (:class:`str`): - Required. The resource name of the MetadataStore where - the Context should be created. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - context (:class:`google.cloud.aiplatform_v1beta1.types.Context`): - Required. The Context to create. - This corresponds to the ``context`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - context_id (:class:`str`): - The {context} portion of the resource name with the - format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}``. - If not provided, the Context's ID will be a UUID - generated by the service. Must be 4-128 characters in - length. Valid characters are ``/[a-z][0-9]-/``. Must be - unique across all Contexts in the parent MetadataStore. - (Otherwise the request will fail with ALREADY_EXISTS, or - PERMISSION_DENIED if the caller can't view the - preexisting Context.) - - This corresponds to the ``context_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Context: - Instance of a general context. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, context, context_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.CreateContextRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if context is not None: - request.context = context - if context_id is not None: - request.context_id = context_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_context, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_context(self, - request: Union[metadata_service.GetContextRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> context.Context: - r"""Retrieves a specific Context. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.GetContextRequest, dict]): - The request object. Request message for - [MetadataService.GetContext][google.cloud.aiplatform.v1beta1.MetadataService.GetContext]. - name (:class:`str`): - Required. The resource name of the Context to retrieve. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Context: - Instance of a general context. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.GetContextRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_context, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_contexts(self, - request: Union[metadata_service.ListContextsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListContextsAsyncPager: - r"""Lists Contexts on the MetadataStore. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListContextsRequest, dict]): - The request object. Request message for - [MetadataService.ListContexts][google.cloud.aiplatform.v1beta1.MetadataService.ListContexts] - parent (:class:`str`): - Required. The MetadataStore whose Contexts should be - listed. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListContextsAsyncPager: - Response message for - [MetadataService.ListContexts][google.cloud.aiplatform.v1beta1.MetadataService.ListContexts]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.ListContextsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_contexts, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListContextsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_context(self, - request: Union[metadata_service.UpdateContextRequest, dict] = None, - *, - context: gca_context.Context = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_context.Context: - r"""Updates a stored Context. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.UpdateContextRequest, dict]): - The request object. Request message for - [MetadataService.UpdateContext][google.cloud.aiplatform.v1beta1.MetadataService.UpdateContext]. - context (:class:`google.cloud.aiplatform_v1beta1.types.Context`): - Required. The Context containing updates. The Context's - [Context.name][google.cloud.aiplatform.v1beta1.Context.name] - field is used to identify the Context to be updated. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` - - This corresponds to the ``context`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. A FieldMask indicating - which fields should be updated. - Functionality of this field is not yet - supported. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Context: - Instance of a general context. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([context, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.UpdateContextRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if context is not None: - request.context = context - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_context, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("context.name", request.context.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_context(self, - request: Union[metadata_service.DeleteContextRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a stored Context. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.DeleteContextRequest, dict]): - The request object. Request message for - [MetadataService.DeleteContext][google.cloud.aiplatform.v1beta1.MetadataService.DeleteContext]. - name (:class:`str`): - Required. The resource name of the Context to delete. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.DeleteContextRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_context, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def purge_contexts(self, - request: Union[metadata_service.PurgeContextsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Purges Contexts. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.PurgeContextsRequest, dict]): - The request object. Request message for - [MetadataService.PurgeContexts][google.cloud.aiplatform.v1beta1.MetadataService.PurgeContexts]. - parent (:class:`str`): - Required. The metadata store to purge Contexts from. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.PurgeContextsResponse` - Response message for - [MetadataService.PurgeContexts][google.cloud.aiplatform.v1beta1.MetadataService.PurgeContexts]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.PurgeContextsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.purge_contexts, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - metadata_service.PurgeContextsResponse, - metadata_type=metadata_service.PurgeContextsMetadata, - ) - - # Done; return the response. - return response - - async def add_context_artifacts_and_executions(self, - request: Union[metadata_service.AddContextArtifactsAndExecutionsRequest, dict] = None, - *, - context: str = None, - artifacts: Sequence[str] = None, - executions: Sequence[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> metadata_service.AddContextArtifactsAndExecutionsResponse: - r"""Adds a set of Artifacts and Executions to a Context. - If any of the Artifacts or Executions have already been - added to a Context, they are simply skipped. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.AddContextArtifactsAndExecutionsRequest, dict]): - The request object. Request message for - [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1beta1.MetadataService.AddContextArtifactsAndExecutions]. - context (:class:`str`): - Required. The resource name of the Context that the - Artifacts and Executions belong to. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` - - This corresponds to the ``context`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - artifacts (:class:`Sequence[str]`): - The resource names of the Artifacts to attribute to the - Context. - - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` - - This corresponds to the ``artifacts`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - executions (:class:`Sequence[str]`): - The resource names of the Executions to associate with - the Context. - - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` - - This corresponds to the ``executions`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.AddContextArtifactsAndExecutionsResponse: - Response message for - [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1beta1.MetadataService.AddContextArtifactsAndExecutions]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([context, artifacts, executions]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.AddContextArtifactsAndExecutionsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if context is not None: - request.context = context - if artifacts: - request.artifacts.extend(artifacts) - if executions: - request.executions.extend(executions) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.add_context_artifacts_and_executions, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("context", request.context), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def add_context_children(self, - request: Union[metadata_service.AddContextChildrenRequest, dict] = None, - *, - context: str = None, - child_contexts: Sequence[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> metadata_service.AddContextChildrenResponse: - r"""Adds a set of Contexts as children to a parent Context. If any - of the child Contexts have already been added to the parent - Context, they are simply skipped. If this call would create a - cycle or cause any Context to have more than 10 parents, the - request will fail with an INVALID_ARGUMENT error. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.AddContextChildrenRequest, dict]): - The request object. Request message for - [MetadataService.AddContextChildren][google.cloud.aiplatform.v1beta1.MetadataService.AddContextChildren]. - context (:class:`str`): - Required. The resource name of the parent Context. - - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` - - This corresponds to the ``context`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - child_contexts (:class:`Sequence[str]`): - The resource names of the child - Contexts. - - This corresponds to the ``child_contexts`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.AddContextChildrenResponse: - Response message for - [MetadataService.AddContextChildren][google.cloud.aiplatform.v1beta1.MetadataService.AddContextChildren]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([context, child_contexts]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.AddContextChildrenRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if context is not None: - request.context = context - if child_contexts: - request.child_contexts.extend(child_contexts) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.add_context_children, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("context", request.context), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def query_context_lineage_subgraph(self, - request: Union[metadata_service.QueryContextLineageSubgraphRequest, dict] = None, - *, - context: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> lineage_subgraph.LineageSubgraph: - r"""Retrieves Artifacts and Executions within the - specified Context, connected by Event edges and returned - as a LineageSubgraph. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.QueryContextLineageSubgraphRequest, dict]): - The request object. Request message for - [MetadataService.QueryContextLineageSubgraph][google.cloud.aiplatform.v1beta1.MetadataService.QueryContextLineageSubgraph]. - context (:class:`str`): - Required. The resource name of the Context whose - Artifacts and Executions should be retrieved as a - LineageSubgraph. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` - - The request may error with FAILED_PRECONDITION if the - number of Artifacts, the number of Executions, or the - number of Events that would be returned for the Context - exceeds 1000. - - This corresponds to the ``context`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.LineageSubgraph: - A subgraph of the overall lineage - graph. Event edges connect Artifact and - Execution nodes. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([context]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.QueryContextLineageSubgraphRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if context is not None: - request.context = context - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.query_context_lineage_subgraph, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("context", request.context), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def create_execution(self, - request: Union[metadata_service.CreateExecutionRequest, dict] = None, - *, - parent: str = None, - execution: gca_execution.Execution = None, - execution_id: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_execution.Execution: - r"""Creates an Execution associated with a MetadataStore. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CreateExecutionRequest, dict]): - The request object. Request message for - [MetadataService.CreateExecution][google.cloud.aiplatform.v1beta1.MetadataService.CreateExecution]. - parent (:class:`str`): - Required. The resource name of the MetadataStore where - the Execution should be created. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - execution (:class:`google.cloud.aiplatform_v1beta1.types.Execution`): - Required. The Execution to create. - This corresponds to the ``execution`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - execution_id (:class:`str`): - The {execution} portion of the resource name with the - format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` - If not provided, the Execution's ID will be a UUID - generated by the service. Must be 4-128 characters in - length. Valid characters are ``/[a-z][0-9]-/``. Must be - unique across all Executions in the parent - MetadataStore. (Otherwise the request will fail with - ALREADY_EXISTS, or PERMISSION_DENIED if the caller can't - view the preexisting Execution.) - - This corresponds to the ``execution_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Execution: - Instance of a general execution. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, execution, execution_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.CreateExecutionRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if execution is not None: - request.execution = execution - if execution_id is not None: - request.execution_id = execution_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_execution, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_execution(self, - request: Union[metadata_service.GetExecutionRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> execution.Execution: - r"""Retrieves a specific Execution. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.GetExecutionRequest, dict]): - The request object. Request message for - [MetadataService.GetExecution][google.cloud.aiplatform.v1beta1.MetadataService.GetExecution]. - name (:class:`str`): - Required. The resource name of the Execution to - retrieve. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Execution: - Instance of a general execution. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.GetExecutionRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_execution, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_executions(self, - request: Union[metadata_service.ListExecutionsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListExecutionsAsyncPager: - r"""Lists Executions in the MetadataStore. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListExecutionsRequest, dict]): - The request object. Request message for - [MetadataService.ListExecutions][google.cloud.aiplatform.v1beta1.MetadataService.ListExecutions]. - parent (:class:`str`): - Required. The MetadataStore whose Executions should be - listed. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListExecutionsAsyncPager: - Response message for - [MetadataService.ListExecutions][google.cloud.aiplatform.v1beta1.MetadataService.ListExecutions]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.ListExecutionsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_executions, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListExecutionsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_execution(self, - request: Union[metadata_service.UpdateExecutionRequest, dict] = None, - *, - execution: gca_execution.Execution = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_execution.Execution: - r"""Updates a stored Execution. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.UpdateExecutionRequest, dict]): - The request object. Request message for - [MetadataService.UpdateExecution][google.cloud.aiplatform.v1beta1.MetadataService.UpdateExecution]. - execution (:class:`google.cloud.aiplatform_v1beta1.types.Execution`): - Required. The Execution containing updates. The - Execution's - [Execution.name][google.cloud.aiplatform.v1beta1.Execution.name] - field is used to identify the Execution to be updated. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` - - This corresponds to the ``execution`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. A FieldMask indicating - which fields should be updated. - Functionality of this field is not yet - supported. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Execution: - Instance of a general execution. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([execution, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.UpdateExecutionRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if execution is not None: - request.execution = execution - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_execution, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("execution.name", request.execution.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_execution(self, - request: Union[metadata_service.DeleteExecutionRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes an Execution. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.DeleteExecutionRequest, dict]): - The request object. Request message for - [MetadataService.DeleteExecution][google.cloud.aiplatform.v1beta1.MetadataService.DeleteExecution]. - name (:class:`str`): - Required. The resource name of the Execution to delete. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.DeleteExecutionRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_execution, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def purge_executions(self, - request: Union[metadata_service.PurgeExecutionsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Purges Executions. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.PurgeExecutionsRequest, dict]): - The request object. Request message for - [MetadataService.PurgeExecutions][google.cloud.aiplatform.v1beta1.MetadataService.PurgeExecutions]. - parent (:class:`str`): - Required. The metadata store to purge Executions from. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.PurgeExecutionsResponse` - Response message for - [MetadataService.PurgeExecutions][google.cloud.aiplatform.v1beta1.MetadataService.PurgeExecutions]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.PurgeExecutionsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.purge_executions, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - metadata_service.PurgeExecutionsResponse, - metadata_type=metadata_service.PurgeExecutionsMetadata, - ) - - # Done; return the response. - return response - - async def add_execution_events(self, - request: Union[metadata_service.AddExecutionEventsRequest, dict] = None, - *, - execution: str = None, - events: Sequence[event.Event] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> metadata_service.AddExecutionEventsResponse: - r"""Adds Events to the specified Execution. An Event - indicates whether an Artifact was used as an input or - output for an Execution. If an Event already exists - between the Execution and the Artifact, the Event is - skipped. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.AddExecutionEventsRequest, dict]): - The request object. Request message for - [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1beta1.MetadataService.AddExecutionEvents]. - execution (:class:`str`): - Required. The resource name of the Execution that the - Events connect Artifacts with. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` - - This corresponds to the ``execution`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - events (:class:`Sequence[google.cloud.aiplatform_v1beta1.types.Event]`): - The Events to create and add. - This corresponds to the ``events`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.AddExecutionEventsResponse: - Response message for - [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1beta1.MetadataService.AddExecutionEvents]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([execution, events]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.AddExecutionEventsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if execution is not None: - request.execution = execution - if events: - request.events.extend(events) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.add_execution_events, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("execution", request.execution), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def query_execution_inputs_and_outputs(self, - request: Union[metadata_service.QueryExecutionInputsAndOutputsRequest, dict] = None, - *, - execution: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> lineage_subgraph.LineageSubgraph: - r"""Obtains the set of input and output Artifacts for - this Execution, in the form of LineageSubgraph that also - contains the Execution and connecting Events. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.QueryExecutionInputsAndOutputsRequest, dict]): - The request object. Request message for - [MetadataService.QueryExecutionInputsAndOutputs][google.cloud.aiplatform.v1beta1.MetadataService.QueryExecutionInputsAndOutputs]. - execution (:class:`str`): - Required. The resource name of the Execution whose input - and output Artifacts should be retrieved as a - LineageSubgraph. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` - - This corresponds to the ``execution`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.LineageSubgraph: - A subgraph of the overall lineage - graph. Event edges connect Artifact and - Execution nodes. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([execution]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.QueryExecutionInputsAndOutputsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if execution is not None: - request.execution = execution - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.query_execution_inputs_and_outputs, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("execution", request.execution), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def create_metadata_schema(self, - request: Union[metadata_service.CreateMetadataSchemaRequest, dict] = None, - *, - parent: str = None, - metadata_schema: gca_metadata_schema.MetadataSchema = None, - metadata_schema_id: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_metadata_schema.MetadataSchema: - r"""Creates a MetadataSchema. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CreateMetadataSchemaRequest, dict]): - The request object. Request message for - [MetadataService.CreateMetadataSchema][google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataSchema]. - parent (:class:`str`): - Required. The resource name of the MetadataStore where - the MetadataSchema should be created. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - metadata_schema (:class:`google.cloud.aiplatform_v1beta1.types.MetadataSchema`): - Required. The MetadataSchema to - create. - - This corresponds to the ``metadata_schema`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - metadata_schema_id (:class:`str`): - The {metadata_schema} portion of the resource name with - the format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema}`` - If not provided, the MetadataStore's ID will be a UUID - generated by the service. Must be 4-128 characters in - length. Valid characters are ``/[a-z][0-9]-/``. Must be - unique across all MetadataSchemas in the parent - Location. (Otherwise the request will fail with - ALREADY_EXISTS, or PERMISSION_DENIED if the caller can't - view the preexisting MetadataSchema.) - - This corresponds to the ``metadata_schema_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.MetadataSchema: - Instance of a general MetadataSchema. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, metadata_schema, metadata_schema_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.CreateMetadataSchemaRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if metadata_schema is not None: - request.metadata_schema = metadata_schema - if metadata_schema_id is not None: - request.metadata_schema_id = metadata_schema_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_metadata_schema, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_metadata_schema(self, - request: Union[metadata_service.GetMetadataSchemaRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> metadata_schema.MetadataSchema: - r"""Retrieves a specific MetadataSchema. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.GetMetadataSchemaRequest, dict]): - The request object. Request message for - [MetadataService.GetMetadataSchema][google.cloud.aiplatform.v1beta1.MetadataService.GetMetadataSchema]. - name (:class:`str`): - Required. The resource name of the MetadataSchema to - retrieve. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.MetadataSchema: - Instance of a general MetadataSchema. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.GetMetadataSchemaRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_metadata_schema, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_metadata_schemas(self, - request: Union[metadata_service.ListMetadataSchemasRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListMetadataSchemasAsyncPager: - r"""Lists MetadataSchemas. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListMetadataSchemasRequest, dict]): - The request object. Request message for - [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataSchemas]. - parent (:class:`str`): - Required. The MetadataStore whose MetadataSchemas should - be listed. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListMetadataSchemasAsyncPager: - Response message for - [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataSchemas]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.ListMetadataSchemasRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_metadata_schemas, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListMetadataSchemasAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def query_artifact_lineage_subgraph(self, - request: Union[metadata_service.QueryArtifactLineageSubgraphRequest, dict] = None, - *, - artifact: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> lineage_subgraph.LineageSubgraph: - r"""Retrieves lineage of an Artifact represented through - Artifacts and Executions connected by Event edges and - returned as a LineageSubgraph. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.QueryArtifactLineageSubgraphRequest, dict]): - The request object. Request message for - [MetadataService.QueryArtifactLineageSubgraph][google.cloud.aiplatform.v1beta1.MetadataService.QueryArtifactLineageSubgraph]. - artifact (:class:`str`): - Required. The resource name of the Artifact whose - Lineage needs to be retrieved as a LineageSubgraph. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` - - The request may error with FAILED_PRECONDITION if the - number of Artifacts, the number of Executions, or the - number of Events that would be returned for the Context - exceeds 1000. - - This corresponds to the ``artifact`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.LineageSubgraph: - A subgraph of the overall lineage - graph. Event edges connect Artifact and - Execution nodes. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([artifact]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = metadata_service.QueryArtifactLineageSubgraphRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if artifact is not None: - request.artifact = artifact - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.query_artifact_lineage_subgraph, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("artifact", request.artifact), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def __aenter__(self): - return self - - async def __aexit__(self, exc_type, exc, tb): - await self.transport.close() - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "MetadataServiceAsyncClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/client.py deleted file mode 100644 index 1d9f84a1b1..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/client.py +++ /dev/null @@ -1,3214 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import os -import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.metadata_service import pagers -from google.cloud.aiplatform_v1beta1.types import artifact -from google.cloud.aiplatform_v1beta1.types import artifact as gca_artifact -from google.cloud.aiplatform_v1beta1.types import context -from google.cloud.aiplatform_v1beta1.types import context as gca_context -from google.cloud.aiplatform_v1beta1.types import encryption_spec -from google.cloud.aiplatform_v1beta1.types import event -from google.cloud.aiplatform_v1beta1.types import execution -from google.cloud.aiplatform_v1beta1.types import execution as gca_execution -from google.cloud.aiplatform_v1beta1.types import lineage_subgraph -from google.cloud.aiplatform_v1beta1.types import metadata_schema -from google.cloud.aiplatform_v1beta1.types import metadata_schema as gca_metadata_schema -from google.cloud.aiplatform_v1beta1.types import metadata_service -from google.cloud.aiplatform_v1beta1.types import metadata_store -from google.cloud.aiplatform_v1beta1.types import metadata_store as gca_metadata_store -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import MetadataServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import MetadataServiceGrpcTransport -from .transports.grpc_asyncio import MetadataServiceGrpcAsyncIOTransport - - -class MetadataServiceClientMeta(type): - """Metaclass for the MetadataService client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[MetadataServiceTransport]] - _transport_registry["grpc"] = MetadataServiceGrpcTransport - _transport_registry["grpc_asyncio"] = MetadataServiceGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[MetadataServiceTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class MetadataServiceClient(metaclass=MetadataServiceClientMeta): - """Service for reading and writing metadata entries.""" - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "aiplatform.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - MetadataServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - MetadataServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> MetadataServiceTransport: - """Returns the transport used by the client instance. - - Returns: - MetadataServiceTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def artifact_path(project: str,location: str,metadata_store: str,artifact: str,) -> str: - """Returns a fully-qualified artifact string.""" - return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}".format(project=project, location=location, metadata_store=metadata_store, artifact=artifact, ) - - @staticmethod - def parse_artifact_path(path: str) -> Dict[str,str]: - """Parses a artifact path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/artifacts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def context_path(project: str,location: str,metadata_store: str,context: str,) -> str: - """Returns a fully-qualified context string.""" - return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}".format(project=project, location=location, metadata_store=metadata_store, context=context, ) - - @staticmethod - def parse_context_path(path: str) -> Dict[str,str]: - """Parses a context path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/contexts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def execution_path(project: str,location: str,metadata_store: str,execution: str,) -> str: - """Returns a fully-qualified execution string.""" - return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}".format(project=project, location=location, metadata_store=metadata_store, execution=execution, ) - - @staticmethod - def parse_execution_path(path: str) -> Dict[str,str]: - """Parses a execution path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/executions/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def metadata_schema_path(project: str,location: str,metadata_store: str,metadata_schema: str,) -> str: - """Returns a fully-qualified metadata_schema string.""" - return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/metadataSchemas/{metadata_schema}".format(project=project, location=location, metadata_store=metadata_store, metadata_schema=metadata_schema, ) - - @staticmethod - def parse_metadata_schema_path(path: str) -> Dict[str,str]: - """Parses a metadata_schema path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/metadataSchemas/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def metadata_store_path(project: str,location: str,metadata_store: str,) -> str: - """Returns a fully-qualified metadata_store string.""" - return "projects/{project}/locations/{location}/metadataStores/{metadata_store}".format(project=project, location=location, metadata_store=metadata_store, ) - - @staticmethod - def parse_metadata_store_path(path: str) -> Dict[str,str]: - """Parses a metadata_store path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, MetadataServiceTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the metadata service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, MetadataServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): - raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, MetadataServiceTransport): - # transport is a MetadataServiceTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - always_use_jwt_access=True, - ) - - def create_metadata_store(self, - request: Union[metadata_service.CreateMetadataStoreRequest, dict] = None, - *, - parent: str = None, - metadata_store: gca_metadata_store.MetadataStore = None, - metadata_store_id: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Initializes a MetadataStore, including allocation of - resources. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CreateMetadataStoreRequest, dict]): - The request object. Request message for - [MetadataService.CreateMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataStore]. - parent (str): - Required. The resource name of the Location where the - MetadataStore should be created. Format: - ``projects/{project}/locations/{location}/`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - metadata_store (google.cloud.aiplatform_v1beta1.types.MetadataStore): - Required. The MetadataStore to - create. - - This corresponds to the ``metadata_store`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - metadata_store_id (str): - The {metadatastore} portion of the resource name with - the format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - If not provided, the MetadataStore's ID will be a UUID - generated by the service. Must be 4-128 characters in - length. Valid characters are ``/[a-z][0-9]-/``. Must be - unique across all MetadataStores in the parent Location. - (Otherwise the request will fail with ALREADY_EXISTS, or - PERMISSION_DENIED if the caller can't view the - preexisting MetadataStore.) - - This corresponds to the ``metadata_store_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.MetadataStore` Instance of a metadata store. Contains a set of metadata that can be - queried. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, metadata_store, metadata_store_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.CreateMetadataStoreRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.CreateMetadataStoreRequest): - request = metadata_service.CreateMetadataStoreRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if metadata_store is not None: - request.metadata_store = metadata_store - if metadata_store_id is not None: - request.metadata_store_id = metadata_store_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_metadata_store] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - gca_metadata_store.MetadataStore, - metadata_type=metadata_service.CreateMetadataStoreOperationMetadata, - ) - - # Done; return the response. - return response - - def get_metadata_store(self, - request: Union[metadata_service.GetMetadataStoreRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> metadata_store.MetadataStore: - r"""Retrieves a specific MetadataStore. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.GetMetadataStoreRequest, dict]): - The request object. Request message for - [MetadataService.GetMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.GetMetadataStore]. - name (str): - Required. The resource name of the MetadataStore to - retrieve. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.MetadataStore: - Instance of a metadata store. - Contains a set of metadata that can be - queried. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.GetMetadataStoreRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.GetMetadataStoreRequest): - request = metadata_service.GetMetadataStoreRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_metadata_store] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_metadata_stores(self, - request: Union[metadata_service.ListMetadataStoresRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListMetadataStoresPager: - r"""Lists MetadataStores for a Location. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListMetadataStoresRequest, dict]): - The request object. Request message for - [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataStores]. - parent (str): - Required. The Location whose MetadataStores should be - listed. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListMetadataStoresPager: - Response message for - [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataStores]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.ListMetadataStoresRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.ListMetadataStoresRequest): - request = metadata_service.ListMetadataStoresRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_metadata_stores] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListMetadataStoresPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_metadata_store(self, - request: Union[metadata_service.DeleteMetadataStoreRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a single MetadataStore and all its child - resources (Artifacts, Executions, and Contexts). - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.DeleteMetadataStoreRequest, dict]): - The request object. Request message for - [MetadataService.DeleteMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.DeleteMetadataStore]. - name (str): - Required. The resource name of the MetadataStore to - delete. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.DeleteMetadataStoreRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.DeleteMetadataStoreRequest): - request = metadata_service.DeleteMetadataStoreRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_metadata_store] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=metadata_service.DeleteMetadataStoreOperationMetadata, - ) - - # Done; return the response. - return response - - def create_artifact(self, - request: Union[metadata_service.CreateArtifactRequest, dict] = None, - *, - parent: str = None, - artifact: gca_artifact.Artifact = None, - artifact_id: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_artifact.Artifact: - r"""Creates an Artifact associated with a MetadataStore. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CreateArtifactRequest, dict]): - The request object. Request message for - [MetadataService.CreateArtifact][google.cloud.aiplatform.v1beta1.MetadataService.CreateArtifact]. - parent (str): - Required. The resource name of the MetadataStore where - the Artifact should be created. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - artifact (google.cloud.aiplatform_v1beta1.types.Artifact): - Required. The Artifact to create. - This corresponds to the ``artifact`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - artifact_id (str): - The {artifact} portion of the resource name with the - format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` - If not provided, the Artifact's ID will be a UUID - generated by the service. Must be 4-128 characters in - length. Valid characters are ``/[a-z][0-9]-/``. Must be - unique across all Artifacts in the parent MetadataStore. - (Otherwise the request will fail with ALREADY_EXISTS, or - PERMISSION_DENIED if the caller can't view the - preexisting Artifact.) - - This corresponds to the ``artifact_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Artifact: - Instance of a general artifact. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, artifact, artifact_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.CreateArtifactRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.CreateArtifactRequest): - request = metadata_service.CreateArtifactRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if artifact is not None: - request.artifact = artifact - if artifact_id is not None: - request.artifact_id = artifact_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_artifact] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_artifact(self, - request: Union[metadata_service.GetArtifactRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> artifact.Artifact: - r"""Retrieves a specific Artifact. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.GetArtifactRequest, dict]): - The request object. Request message for - [MetadataService.GetArtifact][google.cloud.aiplatform.v1beta1.MetadataService.GetArtifact]. - name (str): - Required. The resource name of the Artifact to retrieve. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Artifact: - Instance of a general artifact. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.GetArtifactRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.GetArtifactRequest): - request = metadata_service.GetArtifactRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_artifact] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_artifacts(self, - request: Union[metadata_service.ListArtifactsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListArtifactsPager: - r"""Lists Artifacts in the MetadataStore. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListArtifactsRequest, dict]): - The request object. Request message for - [MetadataService.ListArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.ListArtifacts]. - parent (str): - Required. The MetadataStore whose Artifacts should be - listed. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListArtifactsPager: - Response message for - [MetadataService.ListArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.ListArtifacts]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.ListArtifactsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.ListArtifactsRequest): - request = metadata_service.ListArtifactsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_artifacts] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListArtifactsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_artifact(self, - request: Union[metadata_service.UpdateArtifactRequest, dict] = None, - *, - artifact: gca_artifact.Artifact = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_artifact.Artifact: - r"""Updates a stored Artifact. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.UpdateArtifactRequest, dict]): - The request object. Request message for - [MetadataService.UpdateArtifact][google.cloud.aiplatform.v1beta1.MetadataService.UpdateArtifact]. - artifact (google.cloud.aiplatform_v1beta1.types.Artifact): - Required. The Artifact containing updates. The - Artifact's - [Artifact.name][google.cloud.aiplatform.v1beta1.Artifact.name] - field is used to identify the Artifact to be updated. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` - - This corresponds to the ``artifact`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. A FieldMask indicating - which fields should be updated. - Functionality of this field is not yet - supported. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Artifact: - Instance of a general artifact. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([artifact, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.UpdateArtifactRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.UpdateArtifactRequest): - request = metadata_service.UpdateArtifactRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if artifact is not None: - request.artifact = artifact - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_artifact] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("artifact.name", request.artifact.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_artifact(self, - request: Union[metadata_service.DeleteArtifactRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes an Artifact. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.DeleteArtifactRequest, dict]): - The request object. Request message for - [MetadataService.DeleteArtifact][google.cloud.aiplatform.v1beta1.MetadataService.DeleteArtifact]. - name (str): - Required. The resource name of the Artifact to delete. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.DeleteArtifactRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.DeleteArtifactRequest): - request = metadata_service.DeleteArtifactRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_artifact] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def purge_artifacts(self, - request: Union[metadata_service.PurgeArtifactsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Purges Artifacts. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.PurgeArtifactsRequest, dict]): - The request object. Request message for - [MetadataService.PurgeArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.PurgeArtifacts]. - parent (str): - Required. The metadata store to purge Artifacts from. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.PurgeArtifactsResponse` - Response message for - [MetadataService.PurgeArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.PurgeArtifacts]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.PurgeArtifactsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.PurgeArtifactsRequest): - request = metadata_service.PurgeArtifactsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.purge_artifacts] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - metadata_service.PurgeArtifactsResponse, - metadata_type=metadata_service.PurgeArtifactsMetadata, - ) - - # Done; return the response. - return response - - def create_context(self, - request: Union[metadata_service.CreateContextRequest, dict] = None, - *, - parent: str = None, - context: gca_context.Context = None, - context_id: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_context.Context: - r"""Creates a Context associated with a MetadataStore. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CreateContextRequest, dict]): - The request object. Request message for - [MetadataService.CreateContext][google.cloud.aiplatform.v1beta1.MetadataService.CreateContext]. - parent (str): - Required. The resource name of the MetadataStore where - the Context should be created. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - context (google.cloud.aiplatform_v1beta1.types.Context): - Required. The Context to create. - This corresponds to the ``context`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - context_id (str): - The {context} portion of the resource name with the - format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}``. - If not provided, the Context's ID will be a UUID - generated by the service. Must be 4-128 characters in - length. Valid characters are ``/[a-z][0-9]-/``. Must be - unique across all Contexts in the parent MetadataStore. - (Otherwise the request will fail with ALREADY_EXISTS, or - PERMISSION_DENIED if the caller can't view the - preexisting Context.) - - This corresponds to the ``context_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Context: - Instance of a general context. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, context, context_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.CreateContextRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.CreateContextRequest): - request = metadata_service.CreateContextRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if context is not None: - request.context = context - if context_id is not None: - request.context_id = context_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_context] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_context(self, - request: Union[metadata_service.GetContextRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> context.Context: - r"""Retrieves a specific Context. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.GetContextRequest, dict]): - The request object. Request message for - [MetadataService.GetContext][google.cloud.aiplatform.v1beta1.MetadataService.GetContext]. - name (str): - Required. The resource name of the Context to retrieve. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Context: - Instance of a general context. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.GetContextRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.GetContextRequest): - request = metadata_service.GetContextRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_context] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_contexts(self, - request: Union[metadata_service.ListContextsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListContextsPager: - r"""Lists Contexts on the MetadataStore. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListContextsRequest, dict]): - The request object. Request message for - [MetadataService.ListContexts][google.cloud.aiplatform.v1beta1.MetadataService.ListContexts] - parent (str): - Required. The MetadataStore whose Contexts should be - listed. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListContextsPager: - Response message for - [MetadataService.ListContexts][google.cloud.aiplatform.v1beta1.MetadataService.ListContexts]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.ListContextsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.ListContextsRequest): - request = metadata_service.ListContextsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_contexts] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListContextsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_context(self, - request: Union[metadata_service.UpdateContextRequest, dict] = None, - *, - context: gca_context.Context = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_context.Context: - r"""Updates a stored Context. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.UpdateContextRequest, dict]): - The request object. Request message for - [MetadataService.UpdateContext][google.cloud.aiplatform.v1beta1.MetadataService.UpdateContext]. - context (google.cloud.aiplatform_v1beta1.types.Context): - Required. The Context containing updates. The Context's - [Context.name][google.cloud.aiplatform.v1beta1.Context.name] - field is used to identify the Context to be updated. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` - - This corresponds to the ``context`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. A FieldMask indicating - which fields should be updated. - Functionality of this field is not yet - supported. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Context: - Instance of a general context. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([context, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.UpdateContextRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.UpdateContextRequest): - request = metadata_service.UpdateContextRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if context is not None: - request.context = context - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_context] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("context.name", request.context.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_context(self, - request: Union[metadata_service.DeleteContextRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a stored Context. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.DeleteContextRequest, dict]): - The request object. Request message for - [MetadataService.DeleteContext][google.cloud.aiplatform.v1beta1.MetadataService.DeleteContext]. - name (str): - Required. The resource name of the Context to delete. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.DeleteContextRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.DeleteContextRequest): - request = metadata_service.DeleteContextRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_context] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def purge_contexts(self, - request: Union[metadata_service.PurgeContextsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Purges Contexts. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.PurgeContextsRequest, dict]): - The request object. Request message for - [MetadataService.PurgeContexts][google.cloud.aiplatform.v1beta1.MetadataService.PurgeContexts]. - parent (str): - Required. The metadata store to purge Contexts from. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.PurgeContextsResponse` - Response message for - [MetadataService.PurgeContexts][google.cloud.aiplatform.v1beta1.MetadataService.PurgeContexts]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.PurgeContextsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.PurgeContextsRequest): - request = metadata_service.PurgeContextsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.purge_contexts] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - metadata_service.PurgeContextsResponse, - metadata_type=metadata_service.PurgeContextsMetadata, - ) - - # Done; return the response. - return response - - def add_context_artifacts_and_executions(self, - request: Union[metadata_service.AddContextArtifactsAndExecutionsRequest, dict] = None, - *, - context: str = None, - artifacts: Sequence[str] = None, - executions: Sequence[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> metadata_service.AddContextArtifactsAndExecutionsResponse: - r"""Adds a set of Artifacts and Executions to a Context. - If any of the Artifacts or Executions have already been - added to a Context, they are simply skipped. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.AddContextArtifactsAndExecutionsRequest, dict]): - The request object. Request message for - [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1beta1.MetadataService.AddContextArtifactsAndExecutions]. - context (str): - Required. The resource name of the Context that the - Artifacts and Executions belong to. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` - - This corresponds to the ``context`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - artifacts (Sequence[str]): - The resource names of the Artifacts to attribute to the - Context. - - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` - - This corresponds to the ``artifacts`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - executions (Sequence[str]): - The resource names of the Executions to associate with - the Context. - - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` - - This corresponds to the ``executions`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.AddContextArtifactsAndExecutionsResponse: - Response message for - [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1beta1.MetadataService.AddContextArtifactsAndExecutions]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([context, artifacts, executions]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.AddContextArtifactsAndExecutionsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.AddContextArtifactsAndExecutionsRequest): - request = metadata_service.AddContextArtifactsAndExecutionsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if context is not None: - request.context = context - if artifacts is not None: - request.artifacts = artifacts - if executions is not None: - request.executions = executions - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.add_context_artifacts_and_executions] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("context", request.context), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def add_context_children(self, - request: Union[metadata_service.AddContextChildrenRequest, dict] = None, - *, - context: str = None, - child_contexts: Sequence[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> metadata_service.AddContextChildrenResponse: - r"""Adds a set of Contexts as children to a parent Context. If any - of the child Contexts have already been added to the parent - Context, they are simply skipped. If this call would create a - cycle or cause any Context to have more than 10 parents, the - request will fail with an INVALID_ARGUMENT error. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.AddContextChildrenRequest, dict]): - The request object. Request message for - [MetadataService.AddContextChildren][google.cloud.aiplatform.v1beta1.MetadataService.AddContextChildren]. - context (str): - Required. The resource name of the parent Context. - - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` - - This corresponds to the ``context`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - child_contexts (Sequence[str]): - The resource names of the child - Contexts. - - This corresponds to the ``child_contexts`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.AddContextChildrenResponse: - Response message for - [MetadataService.AddContextChildren][google.cloud.aiplatform.v1beta1.MetadataService.AddContextChildren]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([context, child_contexts]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.AddContextChildrenRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.AddContextChildrenRequest): - request = metadata_service.AddContextChildrenRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if context is not None: - request.context = context - if child_contexts is not None: - request.child_contexts = child_contexts - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.add_context_children] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("context", request.context), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def query_context_lineage_subgraph(self, - request: Union[metadata_service.QueryContextLineageSubgraphRequest, dict] = None, - *, - context: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> lineage_subgraph.LineageSubgraph: - r"""Retrieves Artifacts and Executions within the - specified Context, connected by Event edges and returned - as a LineageSubgraph. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.QueryContextLineageSubgraphRequest, dict]): - The request object. Request message for - [MetadataService.QueryContextLineageSubgraph][google.cloud.aiplatform.v1beta1.MetadataService.QueryContextLineageSubgraph]. - context (str): - Required. The resource name of the Context whose - Artifacts and Executions should be retrieved as a - LineageSubgraph. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` - - The request may error with FAILED_PRECONDITION if the - number of Artifacts, the number of Executions, or the - number of Events that would be returned for the Context - exceeds 1000. - - This corresponds to the ``context`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.LineageSubgraph: - A subgraph of the overall lineage - graph. Event edges connect Artifact and - Execution nodes. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([context]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.QueryContextLineageSubgraphRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.QueryContextLineageSubgraphRequest): - request = metadata_service.QueryContextLineageSubgraphRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if context is not None: - request.context = context - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.query_context_lineage_subgraph] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("context", request.context), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def create_execution(self, - request: Union[metadata_service.CreateExecutionRequest, dict] = None, - *, - parent: str = None, - execution: gca_execution.Execution = None, - execution_id: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_execution.Execution: - r"""Creates an Execution associated with a MetadataStore. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CreateExecutionRequest, dict]): - The request object. Request message for - [MetadataService.CreateExecution][google.cloud.aiplatform.v1beta1.MetadataService.CreateExecution]. - parent (str): - Required. The resource name of the MetadataStore where - the Execution should be created. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - execution (google.cloud.aiplatform_v1beta1.types.Execution): - Required. The Execution to create. - This corresponds to the ``execution`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - execution_id (str): - The {execution} portion of the resource name with the - format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` - If not provided, the Execution's ID will be a UUID - generated by the service. Must be 4-128 characters in - length. Valid characters are ``/[a-z][0-9]-/``. Must be - unique across all Executions in the parent - MetadataStore. (Otherwise the request will fail with - ALREADY_EXISTS, or PERMISSION_DENIED if the caller can't - view the preexisting Execution.) - - This corresponds to the ``execution_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Execution: - Instance of a general execution. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, execution, execution_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.CreateExecutionRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.CreateExecutionRequest): - request = metadata_service.CreateExecutionRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if execution is not None: - request.execution = execution - if execution_id is not None: - request.execution_id = execution_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_execution] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_execution(self, - request: Union[metadata_service.GetExecutionRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> execution.Execution: - r"""Retrieves a specific Execution. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.GetExecutionRequest, dict]): - The request object. Request message for - [MetadataService.GetExecution][google.cloud.aiplatform.v1beta1.MetadataService.GetExecution]. - name (str): - Required. The resource name of the Execution to - retrieve. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Execution: - Instance of a general execution. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.GetExecutionRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.GetExecutionRequest): - request = metadata_service.GetExecutionRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_execution] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_executions(self, - request: Union[metadata_service.ListExecutionsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListExecutionsPager: - r"""Lists Executions in the MetadataStore. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListExecutionsRequest, dict]): - The request object. Request message for - [MetadataService.ListExecutions][google.cloud.aiplatform.v1beta1.MetadataService.ListExecutions]. - parent (str): - Required. The MetadataStore whose Executions should be - listed. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListExecutionsPager: - Response message for - [MetadataService.ListExecutions][google.cloud.aiplatform.v1beta1.MetadataService.ListExecutions]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.ListExecutionsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.ListExecutionsRequest): - request = metadata_service.ListExecutionsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_executions] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListExecutionsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_execution(self, - request: Union[metadata_service.UpdateExecutionRequest, dict] = None, - *, - execution: gca_execution.Execution = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_execution.Execution: - r"""Updates a stored Execution. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.UpdateExecutionRequest, dict]): - The request object. Request message for - [MetadataService.UpdateExecution][google.cloud.aiplatform.v1beta1.MetadataService.UpdateExecution]. - execution (google.cloud.aiplatform_v1beta1.types.Execution): - Required. The Execution containing updates. The - Execution's - [Execution.name][google.cloud.aiplatform.v1beta1.Execution.name] - field is used to identify the Execution to be updated. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` - - This corresponds to the ``execution`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. A FieldMask indicating - which fields should be updated. - Functionality of this field is not yet - supported. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Execution: - Instance of a general execution. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([execution, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.UpdateExecutionRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.UpdateExecutionRequest): - request = metadata_service.UpdateExecutionRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if execution is not None: - request.execution = execution - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_execution] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("execution.name", request.execution.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_execution(self, - request: Union[metadata_service.DeleteExecutionRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes an Execution. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.DeleteExecutionRequest, dict]): - The request object. Request message for - [MetadataService.DeleteExecution][google.cloud.aiplatform.v1beta1.MetadataService.DeleteExecution]. - name (str): - Required. The resource name of the Execution to delete. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.DeleteExecutionRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.DeleteExecutionRequest): - request = metadata_service.DeleteExecutionRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_execution] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def purge_executions(self, - request: Union[metadata_service.PurgeExecutionsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Purges Executions. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.PurgeExecutionsRequest, dict]): - The request object. Request message for - [MetadataService.PurgeExecutions][google.cloud.aiplatform.v1beta1.MetadataService.PurgeExecutions]. - parent (str): - Required. The metadata store to purge Executions from. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.PurgeExecutionsResponse` - Response message for - [MetadataService.PurgeExecutions][google.cloud.aiplatform.v1beta1.MetadataService.PurgeExecutions]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.PurgeExecutionsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.PurgeExecutionsRequest): - request = metadata_service.PurgeExecutionsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.purge_executions] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - metadata_service.PurgeExecutionsResponse, - metadata_type=metadata_service.PurgeExecutionsMetadata, - ) - - # Done; return the response. - return response - - def add_execution_events(self, - request: Union[metadata_service.AddExecutionEventsRequest, dict] = None, - *, - execution: str = None, - events: Sequence[event.Event] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> metadata_service.AddExecutionEventsResponse: - r"""Adds Events to the specified Execution. An Event - indicates whether an Artifact was used as an input or - output for an Execution. If an Event already exists - between the Execution and the Artifact, the Event is - skipped. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.AddExecutionEventsRequest, dict]): - The request object. Request message for - [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1beta1.MetadataService.AddExecutionEvents]. - execution (str): - Required. The resource name of the Execution that the - Events connect Artifacts with. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` - - This corresponds to the ``execution`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - events (Sequence[google.cloud.aiplatform_v1beta1.types.Event]): - The Events to create and add. - This corresponds to the ``events`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.AddExecutionEventsResponse: - Response message for - [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1beta1.MetadataService.AddExecutionEvents]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([execution, events]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.AddExecutionEventsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.AddExecutionEventsRequest): - request = metadata_service.AddExecutionEventsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if execution is not None: - request.execution = execution - if events is not None: - request.events = events - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.add_execution_events] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("execution", request.execution), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def query_execution_inputs_and_outputs(self, - request: Union[metadata_service.QueryExecutionInputsAndOutputsRequest, dict] = None, - *, - execution: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> lineage_subgraph.LineageSubgraph: - r"""Obtains the set of input and output Artifacts for - this Execution, in the form of LineageSubgraph that also - contains the Execution and connecting Events. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.QueryExecutionInputsAndOutputsRequest, dict]): - The request object. Request message for - [MetadataService.QueryExecutionInputsAndOutputs][google.cloud.aiplatform.v1beta1.MetadataService.QueryExecutionInputsAndOutputs]. - execution (str): - Required. The resource name of the Execution whose input - and output Artifacts should be retrieved as a - LineageSubgraph. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` - - This corresponds to the ``execution`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.LineageSubgraph: - A subgraph of the overall lineage - graph. Event edges connect Artifact and - Execution nodes. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([execution]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.QueryExecutionInputsAndOutputsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.QueryExecutionInputsAndOutputsRequest): - request = metadata_service.QueryExecutionInputsAndOutputsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if execution is not None: - request.execution = execution - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.query_execution_inputs_and_outputs] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("execution", request.execution), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def create_metadata_schema(self, - request: Union[metadata_service.CreateMetadataSchemaRequest, dict] = None, - *, - parent: str = None, - metadata_schema: gca_metadata_schema.MetadataSchema = None, - metadata_schema_id: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_metadata_schema.MetadataSchema: - r"""Creates a MetadataSchema. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CreateMetadataSchemaRequest, dict]): - The request object. Request message for - [MetadataService.CreateMetadataSchema][google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataSchema]. - parent (str): - Required. The resource name of the MetadataStore where - the MetadataSchema should be created. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - metadata_schema (google.cloud.aiplatform_v1beta1.types.MetadataSchema): - Required. The MetadataSchema to - create. - - This corresponds to the ``metadata_schema`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - metadata_schema_id (str): - The {metadata_schema} portion of the resource name with - the format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema}`` - If not provided, the MetadataStore's ID will be a UUID - generated by the service. Must be 4-128 characters in - length. Valid characters are ``/[a-z][0-9]-/``. Must be - unique across all MetadataSchemas in the parent - Location. (Otherwise the request will fail with - ALREADY_EXISTS, or PERMISSION_DENIED if the caller can't - view the preexisting MetadataSchema.) - - This corresponds to the ``metadata_schema_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.MetadataSchema: - Instance of a general MetadataSchema. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, metadata_schema, metadata_schema_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.CreateMetadataSchemaRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.CreateMetadataSchemaRequest): - request = metadata_service.CreateMetadataSchemaRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if metadata_schema is not None: - request.metadata_schema = metadata_schema - if metadata_schema_id is not None: - request.metadata_schema_id = metadata_schema_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_metadata_schema] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_metadata_schema(self, - request: Union[metadata_service.GetMetadataSchemaRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> metadata_schema.MetadataSchema: - r"""Retrieves a specific MetadataSchema. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.GetMetadataSchemaRequest, dict]): - The request object. Request message for - [MetadataService.GetMetadataSchema][google.cloud.aiplatform.v1beta1.MetadataService.GetMetadataSchema]. - name (str): - Required. The resource name of the MetadataSchema to - retrieve. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.MetadataSchema: - Instance of a general MetadataSchema. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.GetMetadataSchemaRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.GetMetadataSchemaRequest): - request = metadata_service.GetMetadataSchemaRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_metadata_schema] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_metadata_schemas(self, - request: Union[metadata_service.ListMetadataSchemasRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListMetadataSchemasPager: - r"""Lists MetadataSchemas. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListMetadataSchemasRequest, dict]): - The request object. Request message for - [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataSchemas]. - parent (str): - Required. The MetadataStore whose MetadataSchemas should - be listed. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListMetadataSchemasPager: - Response message for - [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataSchemas]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.ListMetadataSchemasRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.ListMetadataSchemasRequest): - request = metadata_service.ListMetadataSchemasRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_metadata_schemas] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListMetadataSchemasPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def query_artifact_lineage_subgraph(self, - request: Union[metadata_service.QueryArtifactLineageSubgraphRequest, dict] = None, - *, - artifact: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> lineage_subgraph.LineageSubgraph: - r"""Retrieves lineage of an Artifact represented through - Artifacts and Executions connected by Event edges and - returned as a LineageSubgraph. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.QueryArtifactLineageSubgraphRequest, dict]): - The request object. Request message for - [MetadataService.QueryArtifactLineageSubgraph][google.cloud.aiplatform.v1beta1.MetadataService.QueryArtifactLineageSubgraph]. - artifact (str): - Required. The resource name of the Artifact whose - Lineage needs to be retrieved as a LineageSubgraph. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` - - The request may error with FAILED_PRECONDITION if the - number of Artifacts, the number of Executions, or the - number of Events that would be returned for the Context - exceeds 1000. - - This corresponds to the ``artifact`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.LineageSubgraph: - A subgraph of the overall lineage - graph. Event edges connect Artifact and - Execution nodes. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([artifact]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a metadata_service.QueryArtifactLineageSubgraphRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, metadata_service.QueryArtifactLineageSubgraphRequest): - request = metadata_service.QueryArtifactLineageSubgraphRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if artifact is not None: - request.artifact = artifact - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.query_artifact_lineage_subgraph] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("artifact", request.artifact), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - """Releases underlying transport's resources. - - .. warning:: - ONLY use as a context manager if the transport is NOT shared - with other clients! Exiting the with block will CLOSE the transport - and may cause errors in other clients! - """ - self.transport.close() - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "MetadataServiceClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/pagers.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/pagers.py deleted file mode 100644 index d95b81f928..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/pagers.py +++ /dev/null @@ -1,633 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator - -from google.cloud.aiplatform_v1beta1.types import artifact -from google.cloud.aiplatform_v1beta1.types import context -from google.cloud.aiplatform_v1beta1.types import execution -from google.cloud.aiplatform_v1beta1.types import metadata_schema -from google.cloud.aiplatform_v1beta1.types import metadata_service -from google.cloud.aiplatform_v1beta1.types import metadata_store - - -class ListMetadataStoresPager: - """A pager for iterating through ``list_metadata_stores`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListMetadataStoresResponse` object, and - provides an ``__iter__`` method to iterate through its - ``metadata_stores`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListMetadataStores`` requests and continue to iterate - through the ``metadata_stores`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListMetadataStoresResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., metadata_service.ListMetadataStoresResponse], - request: metadata_service.ListMetadataStoresRequest, - response: metadata_service.ListMetadataStoresResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListMetadataStoresRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListMetadataStoresResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = metadata_service.ListMetadataStoresRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[metadata_service.ListMetadataStoresResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[metadata_store.MetadataStore]: - for page in self.pages: - yield from page.metadata_stores - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListMetadataStoresAsyncPager: - """A pager for iterating through ``list_metadata_stores`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListMetadataStoresResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``metadata_stores`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListMetadataStores`` requests and continue to iterate - through the ``metadata_stores`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListMetadataStoresResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[metadata_service.ListMetadataStoresResponse]], - request: metadata_service.ListMetadataStoresRequest, - response: metadata_service.ListMetadataStoresResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListMetadataStoresRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListMetadataStoresResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = metadata_service.ListMetadataStoresRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[metadata_service.ListMetadataStoresResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[metadata_store.MetadataStore]: - async def async_generator(): - async for page in self.pages: - for response in page.metadata_stores: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListArtifactsPager: - """A pager for iterating through ``list_artifacts`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListArtifactsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``artifacts`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListArtifacts`` requests and continue to iterate - through the ``artifacts`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListArtifactsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., metadata_service.ListArtifactsResponse], - request: metadata_service.ListArtifactsRequest, - response: metadata_service.ListArtifactsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListArtifactsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListArtifactsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = metadata_service.ListArtifactsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[metadata_service.ListArtifactsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[artifact.Artifact]: - for page in self.pages: - yield from page.artifacts - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListArtifactsAsyncPager: - """A pager for iterating through ``list_artifacts`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListArtifactsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``artifacts`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListArtifacts`` requests and continue to iterate - through the ``artifacts`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListArtifactsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[metadata_service.ListArtifactsResponse]], - request: metadata_service.ListArtifactsRequest, - response: metadata_service.ListArtifactsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListArtifactsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListArtifactsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = metadata_service.ListArtifactsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[metadata_service.ListArtifactsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[artifact.Artifact]: - async def async_generator(): - async for page in self.pages: - for response in page.artifacts: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListContextsPager: - """A pager for iterating through ``list_contexts`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListContextsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``contexts`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListContexts`` requests and continue to iterate - through the ``contexts`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListContextsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., metadata_service.ListContextsResponse], - request: metadata_service.ListContextsRequest, - response: metadata_service.ListContextsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListContextsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListContextsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = metadata_service.ListContextsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[metadata_service.ListContextsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[context.Context]: - for page in self.pages: - yield from page.contexts - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListContextsAsyncPager: - """A pager for iterating through ``list_contexts`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListContextsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``contexts`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListContexts`` requests and continue to iterate - through the ``contexts`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListContextsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[metadata_service.ListContextsResponse]], - request: metadata_service.ListContextsRequest, - response: metadata_service.ListContextsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListContextsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListContextsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = metadata_service.ListContextsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[metadata_service.ListContextsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[context.Context]: - async def async_generator(): - async for page in self.pages: - for response in page.contexts: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListExecutionsPager: - """A pager for iterating through ``list_executions`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListExecutionsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``executions`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListExecutions`` requests and continue to iterate - through the ``executions`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListExecutionsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., metadata_service.ListExecutionsResponse], - request: metadata_service.ListExecutionsRequest, - response: metadata_service.ListExecutionsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListExecutionsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListExecutionsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = metadata_service.ListExecutionsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[metadata_service.ListExecutionsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[execution.Execution]: - for page in self.pages: - yield from page.executions - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListExecutionsAsyncPager: - """A pager for iterating through ``list_executions`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListExecutionsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``executions`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListExecutions`` requests and continue to iterate - through the ``executions`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListExecutionsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[metadata_service.ListExecutionsResponse]], - request: metadata_service.ListExecutionsRequest, - response: metadata_service.ListExecutionsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListExecutionsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListExecutionsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = metadata_service.ListExecutionsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[metadata_service.ListExecutionsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[execution.Execution]: - async def async_generator(): - async for page in self.pages: - for response in page.executions: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListMetadataSchemasPager: - """A pager for iterating through ``list_metadata_schemas`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListMetadataSchemasResponse` object, and - provides an ``__iter__`` method to iterate through its - ``metadata_schemas`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListMetadataSchemas`` requests and continue to iterate - through the ``metadata_schemas`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListMetadataSchemasResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., metadata_service.ListMetadataSchemasResponse], - request: metadata_service.ListMetadataSchemasRequest, - response: metadata_service.ListMetadataSchemasResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListMetadataSchemasRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListMetadataSchemasResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = metadata_service.ListMetadataSchemasRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[metadata_service.ListMetadataSchemasResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[metadata_schema.MetadataSchema]: - for page in self.pages: - yield from page.metadata_schemas - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListMetadataSchemasAsyncPager: - """A pager for iterating through ``list_metadata_schemas`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListMetadataSchemasResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``metadata_schemas`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListMetadataSchemas`` requests and continue to iterate - through the ``metadata_schemas`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListMetadataSchemasResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[metadata_service.ListMetadataSchemasResponse]], - request: metadata_service.ListMetadataSchemasRequest, - response: metadata_service.ListMetadataSchemasResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListMetadataSchemasRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListMetadataSchemasResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = metadata_service.ListMetadataSchemasRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[metadata_service.ListMetadataSchemasResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[metadata_schema.MetadataSchema]: - async def async_generator(): - async for page in self.pages: - for response in page.metadata_schemas: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/__init__.py deleted file mode 100644 index 688ce8218c..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import MetadataServiceTransport -from .grpc import MetadataServiceGrpcTransport -from .grpc_asyncio import MetadataServiceGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[MetadataServiceTransport]] -_transport_registry['grpc'] = MetadataServiceGrpcTransport -_transport_registry['grpc_asyncio'] = MetadataServiceGrpcAsyncIOTransport - -__all__ = ( - 'MetadataServiceTransport', - 'MetadataServiceGrpcTransport', - 'MetadataServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/base.py deleted file mode 100644 index 3fa8a0f19f..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/base.py +++ /dev/null @@ -1,583 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import pkg_resources - -import google.auth # type: ignore -import google.api_core -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.aiplatform_v1beta1.types import artifact -from google.cloud.aiplatform_v1beta1.types import artifact as gca_artifact -from google.cloud.aiplatform_v1beta1.types import context -from google.cloud.aiplatform_v1beta1.types import context as gca_context -from google.cloud.aiplatform_v1beta1.types import execution -from google.cloud.aiplatform_v1beta1.types import execution as gca_execution -from google.cloud.aiplatform_v1beta1.types import lineage_subgraph -from google.cloud.aiplatform_v1beta1.types import metadata_schema -from google.cloud.aiplatform_v1beta1.types import metadata_schema as gca_metadata_schema -from google.cloud.aiplatform_v1beta1.types import metadata_service -from google.cloud.aiplatform_v1beta1.types import metadata_store -from google.longrunning import operations_pb2 # type: ignore - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-aiplatform', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -class MetadataServiceTransport(abc.ABC): - """Abstract transport class for MetadataService.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'aiplatform.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials are service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.create_metadata_store: gapic_v1.method.wrap_method( - self.create_metadata_store, - default_timeout=5.0, - client_info=client_info, - ), - self.get_metadata_store: gapic_v1.method.wrap_method( - self.get_metadata_store, - default_timeout=5.0, - client_info=client_info, - ), - self.list_metadata_stores: gapic_v1.method.wrap_method( - self.list_metadata_stores, - default_timeout=5.0, - client_info=client_info, - ), - self.delete_metadata_store: gapic_v1.method.wrap_method( - self.delete_metadata_store, - default_timeout=5.0, - client_info=client_info, - ), - self.create_artifact: gapic_v1.method.wrap_method( - self.create_artifact, - default_timeout=5.0, - client_info=client_info, - ), - self.get_artifact: gapic_v1.method.wrap_method( - self.get_artifact, - default_timeout=5.0, - client_info=client_info, - ), - self.list_artifacts: gapic_v1.method.wrap_method( - self.list_artifacts, - default_timeout=5.0, - client_info=client_info, - ), - self.update_artifact: gapic_v1.method.wrap_method( - self.update_artifact, - default_timeout=5.0, - client_info=client_info, - ), - self.delete_artifact: gapic_v1.method.wrap_method( - self.delete_artifact, - default_timeout=None, - client_info=client_info, - ), - self.purge_artifacts: gapic_v1.method.wrap_method( - self.purge_artifacts, - default_timeout=None, - client_info=client_info, - ), - self.create_context: gapic_v1.method.wrap_method( - self.create_context, - default_timeout=5.0, - client_info=client_info, - ), - self.get_context: gapic_v1.method.wrap_method( - self.get_context, - default_timeout=5.0, - client_info=client_info, - ), - self.list_contexts: gapic_v1.method.wrap_method( - self.list_contexts, - default_timeout=5.0, - client_info=client_info, - ), - self.update_context: gapic_v1.method.wrap_method( - self.update_context, - default_timeout=5.0, - client_info=client_info, - ), - self.delete_context: gapic_v1.method.wrap_method( - self.delete_context, - default_timeout=5.0, - client_info=client_info, - ), - self.purge_contexts: gapic_v1.method.wrap_method( - self.purge_contexts, - default_timeout=None, - client_info=client_info, - ), - self.add_context_artifacts_and_executions: gapic_v1.method.wrap_method( - self.add_context_artifacts_and_executions, - default_timeout=5.0, - client_info=client_info, - ), - self.add_context_children: gapic_v1.method.wrap_method( - self.add_context_children, - default_timeout=5.0, - client_info=client_info, - ), - self.query_context_lineage_subgraph: gapic_v1.method.wrap_method( - self.query_context_lineage_subgraph, - default_timeout=5.0, - client_info=client_info, - ), - self.create_execution: gapic_v1.method.wrap_method( - self.create_execution, - default_timeout=5.0, - client_info=client_info, - ), - self.get_execution: gapic_v1.method.wrap_method( - self.get_execution, - default_timeout=5.0, - client_info=client_info, - ), - self.list_executions: gapic_v1.method.wrap_method( - self.list_executions, - default_timeout=5.0, - client_info=client_info, - ), - self.update_execution: gapic_v1.method.wrap_method( - self.update_execution, - default_timeout=5.0, - client_info=client_info, - ), - self.delete_execution: gapic_v1.method.wrap_method( - self.delete_execution, - default_timeout=None, - client_info=client_info, - ), - self.purge_executions: gapic_v1.method.wrap_method( - self.purge_executions, - default_timeout=None, - client_info=client_info, - ), - self.add_execution_events: gapic_v1.method.wrap_method( - self.add_execution_events, - default_timeout=5.0, - client_info=client_info, - ), - self.query_execution_inputs_and_outputs: gapic_v1.method.wrap_method( - self.query_execution_inputs_and_outputs, - default_timeout=5.0, - client_info=client_info, - ), - self.create_metadata_schema: gapic_v1.method.wrap_method( - self.create_metadata_schema, - default_timeout=5.0, - client_info=client_info, - ), - self.get_metadata_schema: gapic_v1.method.wrap_method( - self.get_metadata_schema, - default_timeout=5.0, - client_info=client_info, - ), - self.list_metadata_schemas: gapic_v1.method.wrap_method( - self.list_metadata_schemas, - default_timeout=5.0, - client_info=client_info, - ), - self.query_artifact_lineage_subgraph: gapic_v1.method.wrap_method( - self.query_artifact_lineage_subgraph, - default_timeout=None, - client_info=client_info, - ), - } - - def close(self): - """Closes resources associated with the transport. - - .. warning:: - Only call this method if the transport is NOT shared - with other clients - this may cause errors in other clients! - """ - raise NotImplementedError() - - @property - def operations_client(self): - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def create_metadata_store(self) -> Callable[ - [metadata_service.CreateMetadataStoreRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def get_metadata_store(self) -> Callable[ - [metadata_service.GetMetadataStoreRequest], - Union[ - metadata_store.MetadataStore, - Awaitable[metadata_store.MetadataStore] - ]]: - raise NotImplementedError() - - @property - def list_metadata_stores(self) -> Callable[ - [metadata_service.ListMetadataStoresRequest], - Union[ - metadata_service.ListMetadataStoresResponse, - Awaitable[metadata_service.ListMetadataStoresResponse] - ]]: - raise NotImplementedError() - - @property - def delete_metadata_store(self) -> Callable[ - [metadata_service.DeleteMetadataStoreRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def create_artifact(self) -> Callable[ - [metadata_service.CreateArtifactRequest], - Union[ - gca_artifact.Artifact, - Awaitable[gca_artifact.Artifact] - ]]: - raise NotImplementedError() - - @property - def get_artifact(self) -> Callable[ - [metadata_service.GetArtifactRequest], - Union[ - artifact.Artifact, - Awaitable[artifact.Artifact] - ]]: - raise NotImplementedError() - - @property - def list_artifacts(self) -> Callable[ - [metadata_service.ListArtifactsRequest], - Union[ - metadata_service.ListArtifactsResponse, - Awaitable[metadata_service.ListArtifactsResponse] - ]]: - raise NotImplementedError() - - @property - def update_artifact(self) -> Callable[ - [metadata_service.UpdateArtifactRequest], - Union[ - gca_artifact.Artifact, - Awaitable[gca_artifact.Artifact] - ]]: - raise NotImplementedError() - - @property - def delete_artifact(self) -> Callable[ - [metadata_service.DeleteArtifactRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def purge_artifacts(self) -> Callable[ - [metadata_service.PurgeArtifactsRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def create_context(self) -> Callable[ - [metadata_service.CreateContextRequest], - Union[ - gca_context.Context, - Awaitable[gca_context.Context] - ]]: - raise NotImplementedError() - - @property - def get_context(self) -> Callable[ - [metadata_service.GetContextRequest], - Union[ - context.Context, - Awaitable[context.Context] - ]]: - raise NotImplementedError() - - @property - def list_contexts(self) -> Callable[ - [metadata_service.ListContextsRequest], - Union[ - metadata_service.ListContextsResponse, - Awaitable[metadata_service.ListContextsResponse] - ]]: - raise NotImplementedError() - - @property - def update_context(self) -> Callable[ - [metadata_service.UpdateContextRequest], - Union[ - gca_context.Context, - Awaitable[gca_context.Context] - ]]: - raise NotImplementedError() - - @property - def delete_context(self) -> Callable[ - [metadata_service.DeleteContextRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def purge_contexts(self) -> Callable[ - [metadata_service.PurgeContextsRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def add_context_artifacts_and_executions(self) -> Callable[ - [metadata_service.AddContextArtifactsAndExecutionsRequest], - Union[ - metadata_service.AddContextArtifactsAndExecutionsResponse, - Awaitable[metadata_service.AddContextArtifactsAndExecutionsResponse] - ]]: - raise NotImplementedError() - - @property - def add_context_children(self) -> Callable[ - [metadata_service.AddContextChildrenRequest], - Union[ - metadata_service.AddContextChildrenResponse, - Awaitable[metadata_service.AddContextChildrenResponse] - ]]: - raise NotImplementedError() - - @property - def query_context_lineage_subgraph(self) -> Callable[ - [metadata_service.QueryContextLineageSubgraphRequest], - Union[ - lineage_subgraph.LineageSubgraph, - Awaitable[lineage_subgraph.LineageSubgraph] - ]]: - raise NotImplementedError() - - @property - def create_execution(self) -> Callable[ - [metadata_service.CreateExecutionRequest], - Union[ - gca_execution.Execution, - Awaitable[gca_execution.Execution] - ]]: - raise NotImplementedError() - - @property - def get_execution(self) -> Callable[ - [metadata_service.GetExecutionRequest], - Union[ - execution.Execution, - Awaitable[execution.Execution] - ]]: - raise NotImplementedError() - - @property - def list_executions(self) -> Callable[ - [metadata_service.ListExecutionsRequest], - Union[ - metadata_service.ListExecutionsResponse, - Awaitable[metadata_service.ListExecutionsResponse] - ]]: - raise NotImplementedError() - - @property - def update_execution(self) -> Callable[ - [metadata_service.UpdateExecutionRequest], - Union[ - gca_execution.Execution, - Awaitable[gca_execution.Execution] - ]]: - raise NotImplementedError() - - @property - def delete_execution(self) -> Callable[ - [metadata_service.DeleteExecutionRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def purge_executions(self) -> Callable[ - [metadata_service.PurgeExecutionsRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def add_execution_events(self) -> Callable[ - [metadata_service.AddExecutionEventsRequest], - Union[ - metadata_service.AddExecutionEventsResponse, - Awaitable[metadata_service.AddExecutionEventsResponse] - ]]: - raise NotImplementedError() - - @property - def query_execution_inputs_and_outputs(self) -> Callable[ - [metadata_service.QueryExecutionInputsAndOutputsRequest], - Union[ - lineage_subgraph.LineageSubgraph, - Awaitable[lineage_subgraph.LineageSubgraph] - ]]: - raise NotImplementedError() - - @property - def create_metadata_schema(self) -> Callable[ - [metadata_service.CreateMetadataSchemaRequest], - Union[ - gca_metadata_schema.MetadataSchema, - Awaitable[gca_metadata_schema.MetadataSchema] - ]]: - raise NotImplementedError() - - @property - def get_metadata_schema(self) -> Callable[ - [metadata_service.GetMetadataSchemaRequest], - Union[ - metadata_schema.MetadataSchema, - Awaitable[metadata_schema.MetadataSchema] - ]]: - raise NotImplementedError() - - @property - def list_metadata_schemas(self) -> Callable[ - [metadata_service.ListMetadataSchemasRequest], - Union[ - metadata_service.ListMetadataSchemasResponse, - Awaitable[metadata_service.ListMetadataSchemasResponse] - ]]: - raise NotImplementedError() - - @property - def query_artifact_lineage_subgraph(self) -> Callable[ - [metadata_service.QueryArtifactLineageSubgraphRequest], - Union[ - lineage_subgraph.LineageSubgraph, - Awaitable[lineage_subgraph.LineageSubgraph] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'MetadataServiceTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc.py deleted file mode 100644 index e059fd5880..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc.py +++ /dev/null @@ -1,1084 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers -from google.api_core import operations_v1 -from google.api_core import gapic_v1 -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.aiplatform_v1beta1.types import artifact -from google.cloud.aiplatform_v1beta1.types import artifact as gca_artifact -from google.cloud.aiplatform_v1beta1.types import context -from google.cloud.aiplatform_v1beta1.types import context as gca_context -from google.cloud.aiplatform_v1beta1.types import execution -from google.cloud.aiplatform_v1beta1.types import execution as gca_execution -from google.cloud.aiplatform_v1beta1.types import lineage_subgraph -from google.cloud.aiplatform_v1beta1.types import metadata_schema -from google.cloud.aiplatform_v1beta1.types import metadata_schema as gca_metadata_schema -from google.cloud.aiplatform_v1beta1.types import metadata_service -from google.cloud.aiplatform_v1beta1.types import metadata_store -from google.longrunning import operations_pb2 # type: ignore -from .base import MetadataServiceTransport, DEFAULT_CLIENT_INFO - - -class MetadataServiceGrpcTransport(MetadataServiceTransport): - """gRPC backend transport for MetadataService. - - Service for reading and writing metadata entries. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_metadata_store(self) -> Callable[ - [metadata_service.CreateMetadataStoreRequest], - operations_pb2.Operation]: - r"""Return a callable for the create metadata store method over gRPC. - - Initializes a MetadataStore, including allocation of - resources. - - Returns: - Callable[[~.CreateMetadataStoreRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_metadata_store' not in self._stubs: - self._stubs['create_metadata_store'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/CreateMetadataStore', - request_serializer=metadata_service.CreateMetadataStoreRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_metadata_store'] - - @property - def get_metadata_store(self) -> Callable[ - [metadata_service.GetMetadataStoreRequest], - metadata_store.MetadataStore]: - r"""Return a callable for the get metadata store method over gRPC. - - Retrieves a specific MetadataStore. - - Returns: - Callable[[~.GetMetadataStoreRequest], - ~.MetadataStore]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_metadata_store' not in self._stubs: - self._stubs['get_metadata_store'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/GetMetadataStore', - request_serializer=metadata_service.GetMetadataStoreRequest.serialize, - response_deserializer=metadata_store.MetadataStore.deserialize, - ) - return self._stubs['get_metadata_store'] - - @property - def list_metadata_stores(self) -> Callable[ - [metadata_service.ListMetadataStoresRequest], - metadata_service.ListMetadataStoresResponse]: - r"""Return a callable for the list metadata stores method over gRPC. - - Lists MetadataStores for a Location. - - Returns: - Callable[[~.ListMetadataStoresRequest], - ~.ListMetadataStoresResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_metadata_stores' not in self._stubs: - self._stubs['list_metadata_stores'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/ListMetadataStores', - request_serializer=metadata_service.ListMetadataStoresRequest.serialize, - response_deserializer=metadata_service.ListMetadataStoresResponse.deserialize, - ) - return self._stubs['list_metadata_stores'] - - @property - def delete_metadata_store(self) -> Callable[ - [metadata_service.DeleteMetadataStoreRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete metadata store method over gRPC. - - Deletes a single MetadataStore and all its child - resources (Artifacts, Executions, and Contexts). - - Returns: - Callable[[~.DeleteMetadataStoreRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_metadata_store' not in self._stubs: - self._stubs['delete_metadata_store'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/DeleteMetadataStore', - request_serializer=metadata_service.DeleteMetadataStoreRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_metadata_store'] - - @property - def create_artifact(self) -> Callable[ - [metadata_service.CreateArtifactRequest], - gca_artifact.Artifact]: - r"""Return a callable for the create artifact method over gRPC. - - Creates an Artifact associated with a MetadataStore. - - Returns: - Callable[[~.CreateArtifactRequest], - ~.Artifact]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_artifact' not in self._stubs: - self._stubs['create_artifact'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/CreateArtifact', - request_serializer=metadata_service.CreateArtifactRequest.serialize, - response_deserializer=gca_artifact.Artifact.deserialize, - ) - return self._stubs['create_artifact'] - - @property - def get_artifact(self) -> Callable[ - [metadata_service.GetArtifactRequest], - artifact.Artifact]: - r"""Return a callable for the get artifact method over gRPC. - - Retrieves a specific Artifact. - - Returns: - Callable[[~.GetArtifactRequest], - ~.Artifact]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_artifact' not in self._stubs: - self._stubs['get_artifact'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/GetArtifact', - request_serializer=metadata_service.GetArtifactRequest.serialize, - response_deserializer=artifact.Artifact.deserialize, - ) - return self._stubs['get_artifact'] - - @property - def list_artifacts(self) -> Callable[ - [metadata_service.ListArtifactsRequest], - metadata_service.ListArtifactsResponse]: - r"""Return a callable for the list artifacts method over gRPC. - - Lists Artifacts in the MetadataStore. - - Returns: - Callable[[~.ListArtifactsRequest], - ~.ListArtifactsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_artifacts' not in self._stubs: - self._stubs['list_artifacts'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/ListArtifacts', - request_serializer=metadata_service.ListArtifactsRequest.serialize, - response_deserializer=metadata_service.ListArtifactsResponse.deserialize, - ) - return self._stubs['list_artifacts'] - - @property - def update_artifact(self) -> Callable[ - [metadata_service.UpdateArtifactRequest], - gca_artifact.Artifact]: - r"""Return a callable for the update artifact method over gRPC. - - Updates a stored Artifact. - - Returns: - Callable[[~.UpdateArtifactRequest], - ~.Artifact]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_artifact' not in self._stubs: - self._stubs['update_artifact'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/UpdateArtifact', - request_serializer=metadata_service.UpdateArtifactRequest.serialize, - response_deserializer=gca_artifact.Artifact.deserialize, - ) - return self._stubs['update_artifact'] - - @property - def delete_artifact(self) -> Callable[ - [metadata_service.DeleteArtifactRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete artifact method over gRPC. - - Deletes an Artifact. - - Returns: - Callable[[~.DeleteArtifactRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_artifact' not in self._stubs: - self._stubs['delete_artifact'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/DeleteArtifact', - request_serializer=metadata_service.DeleteArtifactRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_artifact'] - - @property - def purge_artifacts(self) -> Callable[ - [metadata_service.PurgeArtifactsRequest], - operations_pb2.Operation]: - r"""Return a callable for the purge artifacts method over gRPC. - - Purges Artifacts. - - Returns: - Callable[[~.PurgeArtifactsRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'purge_artifacts' not in self._stubs: - self._stubs['purge_artifacts'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/PurgeArtifacts', - request_serializer=metadata_service.PurgeArtifactsRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['purge_artifacts'] - - @property - def create_context(self) -> Callable[ - [metadata_service.CreateContextRequest], - gca_context.Context]: - r"""Return a callable for the create context method over gRPC. - - Creates a Context associated with a MetadataStore. - - Returns: - Callable[[~.CreateContextRequest], - ~.Context]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_context' not in self._stubs: - self._stubs['create_context'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/CreateContext', - request_serializer=metadata_service.CreateContextRequest.serialize, - response_deserializer=gca_context.Context.deserialize, - ) - return self._stubs['create_context'] - - @property - def get_context(self) -> Callable[ - [metadata_service.GetContextRequest], - context.Context]: - r"""Return a callable for the get context method over gRPC. - - Retrieves a specific Context. - - Returns: - Callable[[~.GetContextRequest], - ~.Context]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_context' not in self._stubs: - self._stubs['get_context'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/GetContext', - request_serializer=metadata_service.GetContextRequest.serialize, - response_deserializer=context.Context.deserialize, - ) - return self._stubs['get_context'] - - @property - def list_contexts(self) -> Callable[ - [metadata_service.ListContextsRequest], - metadata_service.ListContextsResponse]: - r"""Return a callable for the list contexts method over gRPC. - - Lists Contexts on the MetadataStore. - - Returns: - Callable[[~.ListContextsRequest], - ~.ListContextsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_contexts' not in self._stubs: - self._stubs['list_contexts'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/ListContexts', - request_serializer=metadata_service.ListContextsRequest.serialize, - response_deserializer=metadata_service.ListContextsResponse.deserialize, - ) - return self._stubs['list_contexts'] - - @property - def update_context(self) -> Callable[ - [metadata_service.UpdateContextRequest], - gca_context.Context]: - r"""Return a callable for the update context method over gRPC. - - Updates a stored Context. - - Returns: - Callable[[~.UpdateContextRequest], - ~.Context]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_context' not in self._stubs: - self._stubs['update_context'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/UpdateContext', - request_serializer=metadata_service.UpdateContextRequest.serialize, - response_deserializer=gca_context.Context.deserialize, - ) - return self._stubs['update_context'] - - @property - def delete_context(self) -> Callable[ - [metadata_service.DeleteContextRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete context method over gRPC. - - Deletes a stored Context. - - Returns: - Callable[[~.DeleteContextRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_context' not in self._stubs: - self._stubs['delete_context'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/DeleteContext', - request_serializer=metadata_service.DeleteContextRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_context'] - - @property - def purge_contexts(self) -> Callable[ - [metadata_service.PurgeContextsRequest], - operations_pb2.Operation]: - r"""Return a callable for the purge contexts method over gRPC. - - Purges Contexts. - - Returns: - Callable[[~.PurgeContextsRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'purge_contexts' not in self._stubs: - self._stubs['purge_contexts'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/PurgeContexts', - request_serializer=metadata_service.PurgeContextsRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['purge_contexts'] - - @property - def add_context_artifacts_and_executions(self) -> Callable[ - [metadata_service.AddContextArtifactsAndExecutionsRequest], - metadata_service.AddContextArtifactsAndExecutionsResponse]: - r"""Return a callable for the add context artifacts and - executions method over gRPC. - - Adds a set of Artifacts and Executions to a Context. - If any of the Artifacts or Executions have already been - added to a Context, they are simply skipped. - - Returns: - Callable[[~.AddContextArtifactsAndExecutionsRequest], - ~.AddContextArtifactsAndExecutionsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'add_context_artifacts_and_executions' not in self._stubs: - self._stubs['add_context_artifacts_and_executions'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/AddContextArtifactsAndExecutions', - request_serializer=metadata_service.AddContextArtifactsAndExecutionsRequest.serialize, - response_deserializer=metadata_service.AddContextArtifactsAndExecutionsResponse.deserialize, - ) - return self._stubs['add_context_artifacts_and_executions'] - - @property - def add_context_children(self) -> Callable[ - [metadata_service.AddContextChildrenRequest], - metadata_service.AddContextChildrenResponse]: - r"""Return a callable for the add context children method over gRPC. - - Adds a set of Contexts as children to a parent Context. If any - of the child Contexts have already been added to the parent - Context, they are simply skipped. If this call would create a - cycle or cause any Context to have more than 10 parents, the - request will fail with an INVALID_ARGUMENT error. - - Returns: - Callable[[~.AddContextChildrenRequest], - ~.AddContextChildrenResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'add_context_children' not in self._stubs: - self._stubs['add_context_children'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/AddContextChildren', - request_serializer=metadata_service.AddContextChildrenRequest.serialize, - response_deserializer=metadata_service.AddContextChildrenResponse.deserialize, - ) - return self._stubs['add_context_children'] - - @property - def query_context_lineage_subgraph(self) -> Callable[ - [metadata_service.QueryContextLineageSubgraphRequest], - lineage_subgraph.LineageSubgraph]: - r"""Return a callable for the query context lineage subgraph method over gRPC. - - Retrieves Artifacts and Executions within the - specified Context, connected by Event edges and returned - as a LineageSubgraph. - - Returns: - Callable[[~.QueryContextLineageSubgraphRequest], - ~.LineageSubgraph]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'query_context_lineage_subgraph' not in self._stubs: - self._stubs['query_context_lineage_subgraph'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/QueryContextLineageSubgraph', - request_serializer=metadata_service.QueryContextLineageSubgraphRequest.serialize, - response_deserializer=lineage_subgraph.LineageSubgraph.deserialize, - ) - return self._stubs['query_context_lineage_subgraph'] - - @property - def create_execution(self) -> Callable[ - [metadata_service.CreateExecutionRequest], - gca_execution.Execution]: - r"""Return a callable for the create execution method over gRPC. - - Creates an Execution associated with a MetadataStore. - - Returns: - Callable[[~.CreateExecutionRequest], - ~.Execution]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_execution' not in self._stubs: - self._stubs['create_execution'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/CreateExecution', - request_serializer=metadata_service.CreateExecutionRequest.serialize, - response_deserializer=gca_execution.Execution.deserialize, - ) - return self._stubs['create_execution'] - - @property - def get_execution(self) -> Callable[ - [metadata_service.GetExecutionRequest], - execution.Execution]: - r"""Return a callable for the get execution method over gRPC. - - Retrieves a specific Execution. - - Returns: - Callable[[~.GetExecutionRequest], - ~.Execution]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_execution' not in self._stubs: - self._stubs['get_execution'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/GetExecution', - request_serializer=metadata_service.GetExecutionRequest.serialize, - response_deserializer=execution.Execution.deserialize, - ) - return self._stubs['get_execution'] - - @property - def list_executions(self) -> Callable[ - [metadata_service.ListExecutionsRequest], - metadata_service.ListExecutionsResponse]: - r"""Return a callable for the list executions method over gRPC. - - Lists Executions in the MetadataStore. - - Returns: - Callable[[~.ListExecutionsRequest], - ~.ListExecutionsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_executions' not in self._stubs: - self._stubs['list_executions'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/ListExecutions', - request_serializer=metadata_service.ListExecutionsRequest.serialize, - response_deserializer=metadata_service.ListExecutionsResponse.deserialize, - ) - return self._stubs['list_executions'] - - @property - def update_execution(self) -> Callable[ - [metadata_service.UpdateExecutionRequest], - gca_execution.Execution]: - r"""Return a callable for the update execution method over gRPC. - - Updates a stored Execution. - - Returns: - Callable[[~.UpdateExecutionRequest], - ~.Execution]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_execution' not in self._stubs: - self._stubs['update_execution'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/UpdateExecution', - request_serializer=metadata_service.UpdateExecutionRequest.serialize, - response_deserializer=gca_execution.Execution.deserialize, - ) - return self._stubs['update_execution'] - - @property - def delete_execution(self) -> Callable[ - [metadata_service.DeleteExecutionRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete execution method over gRPC. - - Deletes an Execution. - - Returns: - Callable[[~.DeleteExecutionRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_execution' not in self._stubs: - self._stubs['delete_execution'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/DeleteExecution', - request_serializer=metadata_service.DeleteExecutionRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_execution'] - - @property - def purge_executions(self) -> Callable[ - [metadata_service.PurgeExecutionsRequest], - operations_pb2.Operation]: - r"""Return a callable for the purge executions method over gRPC. - - Purges Executions. - - Returns: - Callable[[~.PurgeExecutionsRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'purge_executions' not in self._stubs: - self._stubs['purge_executions'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/PurgeExecutions', - request_serializer=metadata_service.PurgeExecutionsRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['purge_executions'] - - @property - def add_execution_events(self) -> Callable[ - [metadata_service.AddExecutionEventsRequest], - metadata_service.AddExecutionEventsResponse]: - r"""Return a callable for the add execution events method over gRPC. - - Adds Events to the specified Execution. An Event - indicates whether an Artifact was used as an input or - output for an Execution. If an Event already exists - between the Execution and the Artifact, the Event is - skipped. - - Returns: - Callable[[~.AddExecutionEventsRequest], - ~.AddExecutionEventsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'add_execution_events' not in self._stubs: - self._stubs['add_execution_events'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/AddExecutionEvents', - request_serializer=metadata_service.AddExecutionEventsRequest.serialize, - response_deserializer=metadata_service.AddExecutionEventsResponse.deserialize, - ) - return self._stubs['add_execution_events'] - - @property - def query_execution_inputs_and_outputs(self) -> Callable[ - [metadata_service.QueryExecutionInputsAndOutputsRequest], - lineage_subgraph.LineageSubgraph]: - r"""Return a callable for the query execution inputs and - outputs method over gRPC. - - Obtains the set of input and output Artifacts for - this Execution, in the form of LineageSubgraph that also - contains the Execution and connecting Events. - - Returns: - Callable[[~.QueryExecutionInputsAndOutputsRequest], - ~.LineageSubgraph]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'query_execution_inputs_and_outputs' not in self._stubs: - self._stubs['query_execution_inputs_and_outputs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/QueryExecutionInputsAndOutputs', - request_serializer=metadata_service.QueryExecutionInputsAndOutputsRequest.serialize, - response_deserializer=lineage_subgraph.LineageSubgraph.deserialize, - ) - return self._stubs['query_execution_inputs_and_outputs'] - - @property - def create_metadata_schema(self) -> Callable[ - [metadata_service.CreateMetadataSchemaRequest], - gca_metadata_schema.MetadataSchema]: - r"""Return a callable for the create metadata schema method over gRPC. - - Creates a MetadataSchema. - - Returns: - Callable[[~.CreateMetadataSchemaRequest], - ~.MetadataSchema]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_metadata_schema' not in self._stubs: - self._stubs['create_metadata_schema'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/CreateMetadataSchema', - request_serializer=metadata_service.CreateMetadataSchemaRequest.serialize, - response_deserializer=gca_metadata_schema.MetadataSchema.deserialize, - ) - return self._stubs['create_metadata_schema'] - - @property - def get_metadata_schema(self) -> Callable[ - [metadata_service.GetMetadataSchemaRequest], - metadata_schema.MetadataSchema]: - r"""Return a callable for the get metadata schema method over gRPC. - - Retrieves a specific MetadataSchema. - - Returns: - Callable[[~.GetMetadataSchemaRequest], - ~.MetadataSchema]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_metadata_schema' not in self._stubs: - self._stubs['get_metadata_schema'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/GetMetadataSchema', - request_serializer=metadata_service.GetMetadataSchemaRequest.serialize, - response_deserializer=metadata_schema.MetadataSchema.deserialize, - ) - return self._stubs['get_metadata_schema'] - - @property - def list_metadata_schemas(self) -> Callable[ - [metadata_service.ListMetadataSchemasRequest], - metadata_service.ListMetadataSchemasResponse]: - r"""Return a callable for the list metadata schemas method over gRPC. - - Lists MetadataSchemas. - - Returns: - Callable[[~.ListMetadataSchemasRequest], - ~.ListMetadataSchemasResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_metadata_schemas' not in self._stubs: - self._stubs['list_metadata_schemas'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/ListMetadataSchemas', - request_serializer=metadata_service.ListMetadataSchemasRequest.serialize, - response_deserializer=metadata_service.ListMetadataSchemasResponse.deserialize, - ) - return self._stubs['list_metadata_schemas'] - - @property - def query_artifact_lineage_subgraph(self) -> Callable[ - [metadata_service.QueryArtifactLineageSubgraphRequest], - lineage_subgraph.LineageSubgraph]: - r"""Return a callable for the query artifact lineage - subgraph method over gRPC. - - Retrieves lineage of an Artifact represented through - Artifacts and Executions connected by Event edges and - returned as a LineageSubgraph. - - Returns: - Callable[[~.QueryArtifactLineageSubgraphRequest], - ~.LineageSubgraph]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'query_artifact_lineage_subgraph' not in self._stubs: - self._stubs['query_artifact_lineage_subgraph'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/QueryArtifactLineageSubgraph', - request_serializer=metadata_service.QueryArtifactLineageSubgraphRequest.serialize, - response_deserializer=lineage_subgraph.LineageSubgraph.deserialize, - ) - return self._stubs['query_artifact_lineage_subgraph'] - - def close(self): - self.grpc_channel.close() - -__all__ = ( - 'MetadataServiceGrpcTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc_asyncio.py deleted file mode 100644 index e360ac1184..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/grpc_asyncio.py +++ /dev/null @@ -1,1088 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers_async -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.aiplatform_v1beta1.types import artifact -from google.cloud.aiplatform_v1beta1.types import artifact as gca_artifact -from google.cloud.aiplatform_v1beta1.types import context -from google.cloud.aiplatform_v1beta1.types import context as gca_context -from google.cloud.aiplatform_v1beta1.types import execution -from google.cloud.aiplatform_v1beta1.types import execution as gca_execution -from google.cloud.aiplatform_v1beta1.types import lineage_subgraph -from google.cloud.aiplatform_v1beta1.types import metadata_schema -from google.cloud.aiplatform_v1beta1.types import metadata_schema as gca_metadata_schema -from google.cloud.aiplatform_v1beta1.types import metadata_service -from google.cloud.aiplatform_v1beta1.types import metadata_store -from google.longrunning import operations_pb2 # type: ignore -from .base import MetadataServiceTransport, DEFAULT_CLIENT_INFO -from .grpc import MetadataServiceGrpcTransport - - -class MetadataServiceGrpcAsyncIOTransport(MetadataServiceTransport): - """gRPC AsyncIO backend transport for MetadataService. - - Service for reading and writing metadata entries. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsAsyncClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_metadata_store(self) -> Callable[ - [metadata_service.CreateMetadataStoreRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the create metadata store method over gRPC. - - Initializes a MetadataStore, including allocation of - resources. - - Returns: - Callable[[~.CreateMetadataStoreRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_metadata_store' not in self._stubs: - self._stubs['create_metadata_store'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/CreateMetadataStore', - request_serializer=metadata_service.CreateMetadataStoreRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_metadata_store'] - - @property - def get_metadata_store(self) -> Callable[ - [metadata_service.GetMetadataStoreRequest], - Awaitable[metadata_store.MetadataStore]]: - r"""Return a callable for the get metadata store method over gRPC. - - Retrieves a specific MetadataStore. - - Returns: - Callable[[~.GetMetadataStoreRequest], - Awaitable[~.MetadataStore]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_metadata_store' not in self._stubs: - self._stubs['get_metadata_store'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/GetMetadataStore', - request_serializer=metadata_service.GetMetadataStoreRequest.serialize, - response_deserializer=metadata_store.MetadataStore.deserialize, - ) - return self._stubs['get_metadata_store'] - - @property - def list_metadata_stores(self) -> Callable[ - [metadata_service.ListMetadataStoresRequest], - Awaitable[metadata_service.ListMetadataStoresResponse]]: - r"""Return a callable for the list metadata stores method over gRPC. - - Lists MetadataStores for a Location. - - Returns: - Callable[[~.ListMetadataStoresRequest], - Awaitable[~.ListMetadataStoresResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_metadata_stores' not in self._stubs: - self._stubs['list_metadata_stores'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/ListMetadataStores', - request_serializer=metadata_service.ListMetadataStoresRequest.serialize, - response_deserializer=metadata_service.ListMetadataStoresResponse.deserialize, - ) - return self._stubs['list_metadata_stores'] - - @property - def delete_metadata_store(self) -> Callable[ - [metadata_service.DeleteMetadataStoreRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete metadata store method over gRPC. - - Deletes a single MetadataStore and all its child - resources (Artifacts, Executions, and Contexts). - - Returns: - Callable[[~.DeleteMetadataStoreRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_metadata_store' not in self._stubs: - self._stubs['delete_metadata_store'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/DeleteMetadataStore', - request_serializer=metadata_service.DeleteMetadataStoreRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_metadata_store'] - - @property - def create_artifact(self) -> Callable[ - [metadata_service.CreateArtifactRequest], - Awaitable[gca_artifact.Artifact]]: - r"""Return a callable for the create artifact method over gRPC. - - Creates an Artifact associated with a MetadataStore. - - Returns: - Callable[[~.CreateArtifactRequest], - Awaitable[~.Artifact]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_artifact' not in self._stubs: - self._stubs['create_artifact'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/CreateArtifact', - request_serializer=metadata_service.CreateArtifactRequest.serialize, - response_deserializer=gca_artifact.Artifact.deserialize, - ) - return self._stubs['create_artifact'] - - @property - def get_artifact(self) -> Callable[ - [metadata_service.GetArtifactRequest], - Awaitable[artifact.Artifact]]: - r"""Return a callable for the get artifact method over gRPC. - - Retrieves a specific Artifact. - - Returns: - Callable[[~.GetArtifactRequest], - Awaitable[~.Artifact]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_artifact' not in self._stubs: - self._stubs['get_artifact'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/GetArtifact', - request_serializer=metadata_service.GetArtifactRequest.serialize, - response_deserializer=artifact.Artifact.deserialize, - ) - return self._stubs['get_artifact'] - - @property - def list_artifacts(self) -> Callable[ - [metadata_service.ListArtifactsRequest], - Awaitable[metadata_service.ListArtifactsResponse]]: - r"""Return a callable for the list artifacts method over gRPC. - - Lists Artifacts in the MetadataStore. - - Returns: - Callable[[~.ListArtifactsRequest], - Awaitable[~.ListArtifactsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_artifacts' not in self._stubs: - self._stubs['list_artifacts'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/ListArtifacts', - request_serializer=metadata_service.ListArtifactsRequest.serialize, - response_deserializer=metadata_service.ListArtifactsResponse.deserialize, - ) - return self._stubs['list_artifacts'] - - @property - def update_artifact(self) -> Callable[ - [metadata_service.UpdateArtifactRequest], - Awaitable[gca_artifact.Artifact]]: - r"""Return a callable for the update artifact method over gRPC. - - Updates a stored Artifact. - - Returns: - Callable[[~.UpdateArtifactRequest], - Awaitable[~.Artifact]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_artifact' not in self._stubs: - self._stubs['update_artifact'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/UpdateArtifact', - request_serializer=metadata_service.UpdateArtifactRequest.serialize, - response_deserializer=gca_artifact.Artifact.deserialize, - ) - return self._stubs['update_artifact'] - - @property - def delete_artifact(self) -> Callable[ - [metadata_service.DeleteArtifactRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete artifact method over gRPC. - - Deletes an Artifact. - - Returns: - Callable[[~.DeleteArtifactRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_artifact' not in self._stubs: - self._stubs['delete_artifact'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/DeleteArtifact', - request_serializer=metadata_service.DeleteArtifactRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_artifact'] - - @property - def purge_artifacts(self) -> Callable[ - [metadata_service.PurgeArtifactsRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the purge artifacts method over gRPC. - - Purges Artifacts. - - Returns: - Callable[[~.PurgeArtifactsRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'purge_artifacts' not in self._stubs: - self._stubs['purge_artifacts'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/PurgeArtifacts', - request_serializer=metadata_service.PurgeArtifactsRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['purge_artifacts'] - - @property - def create_context(self) -> Callable[ - [metadata_service.CreateContextRequest], - Awaitable[gca_context.Context]]: - r"""Return a callable for the create context method over gRPC. - - Creates a Context associated with a MetadataStore. - - Returns: - Callable[[~.CreateContextRequest], - Awaitable[~.Context]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_context' not in self._stubs: - self._stubs['create_context'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/CreateContext', - request_serializer=metadata_service.CreateContextRequest.serialize, - response_deserializer=gca_context.Context.deserialize, - ) - return self._stubs['create_context'] - - @property - def get_context(self) -> Callable[ - [metadata_service.GetContextRequest], - Awaitable[context.Context]]: - r"""Return a callable for the get context method over gRPC. - - Retrieves a specific Context. - - Returns: - Callable[[~.GetContextRequest], - Awaitable[~.Context]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_context' not in self._stubs: - self._stubs['get_context'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/GetContext', - request_serializer=metadata_service.GetContextRequest.serialize, - response_deserializer=context.Context.deserialize, - ) - return self._stubs['get_context'] - - @property - def list_contexts(self) -> Callable[ - [metadata_service.ListContextsRequest], - Awaitable[metadata_service.ListContextsResponse]]: - r"""Return a callable for the list contexts method over gRPC. - - Lists Contexts on the MetadataStore. - - Returns: - Callable[[~.ListContextsRequest], - Awaitable[~.ListContextsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_contexts' not in self._stubs: - self._stubs['list_contexts'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/ListContexts', - request_serializer=metadata_service.ListContextsRequest.serialize, - response_deserializer=metadata_service.ListContextsResponse.deserialize, - ) - return self._stubs['list_contexts'] - - @property - def update_context(self) -> Callable[ - [metadata_service.UpdateContextRequest], - Awaitable[gca_context.Context]]: - r"""Return a callable for the update context method over gRPC. - - Updates a stored Context. - - Returns: - Callable[[~.UpdateContextRequest], - Awaitable[~.Context]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_context' not in self._stubs: - self._stubs['update_context'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/UpdateContext', - request_serializer=metadata_service.UpdateContextRequest.serialize, - response_deserializer=gca_context.Context.deserialize, - ) - return self._stubs['update_context'] - - @property - def delete_context(self) -> Callable[ - [metadata_service.DeleteContextRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete context method over gRPC. - - Deletes a stored Context. - - Returns: - Callable[[~.DeleteContextRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_context' not in self._stubs: - self._stubs['delete_context'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/DeleteContext', - request_serializer=metadata_service.DeleteContextRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_context'] - - @property - def purge_contexts(self) -> Callable[ - [metadata_service.PurgeContextsRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the purge contexts method over gRPC. - - Purges Contexts. - - Returns: - Callable[[~.PurgeContextsRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'purge_contexts' not in self._stubs: - self._stubs['purge_contexts'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/PurgeContexts', - request_serializer=metadata_service.PurgeContextsRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['purge_contexts'] - - @property - def add_context_artifacts_and_executions(self) -> Callable[ - [metadata_service.AddContextArtifactsAndExecutionsRequest], - Awaitable[metadata_service.AddContextArtifactsAndExecutionsResponse]]: - r"""Return a callable for the add context artifacts and - executions method over gRPC. - - Adds a set of Artifacts and Executions to a Context. - If any of the Artifacts or Executions have already been - added to a Context, they are simply skipped. - - Returns: - Callable[[~.AddContextArtifactsAndExecutionsRequest], - Awaitable[~.AddContextArtifactsAndExecutionsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'add_context_artifacts_and_executions' not in self._stubs: - self._stubs['add_context_artifacts_and_executions'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/AddContextArtifactsAndExecutions', - request_serializer=metadata_service.AddContextArtifactsAndExecutionsRequest.serialize, - response_deserializer=metadata_service.AddContextArtifactsAndExecutionsResponse.deserialize, - ) - return self._stubs['add_context_artifacts_and_executions'] - - @property - def add_context_children(self) -> Callable[ - [metadata_service.AddContextChildrenRequest], - Awaitable[metadata_service.AddContextChildrenResponse]]: - r"""Return a callable for the add context children method over gRPC. - - Adds a set of Contexts as children to a parent Context. If any - of the child Contexts have already been added to the parent - Context, they are simply skipped. If this call would create a - cycle or cause any Context to have more than 10 parents, the - request will fail with an INVALID_ARGUMENT error. - - Returns: - Callable[[~.AddContextChildrenRequest], - Awaitable[~.AddContextChildrenResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'add_context_children' not in self._stubs: - self._stubs['add_context_children'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/AddContextChildren', - request_serializer=metadata_service.AddContextChildrenRequest.serialize, - response_deserializer=metadata_service.AddContextChildrenResponse.deserialize, - ) - return self._stubs['add_context_children'] - - @property - def query_context_lineage_subgraph(self) -> Callable[ - [metadata_service.QueryContextLineageSubgraphRequest], - Awaitable[lineage_subgraph.LineageSubgraph]]: - r"""Return a callable for the query context lineage subgraph method over gRPC. - - Retrieves Artifacts and Executions within the - specified Context, connected by Event edges and returned - as a LineageSubgraph. - - Returns: - Callable[[~.QueryContextLineageSubgraphRequest], - Awaitable[~.LineageSubgraph]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'query_context_lineage_subgraph' not in self._stubs: - self._stubs['query_context_lineage_subgraph'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/QueryContextLineageSubgraph', - request_serializer=metadata_service.QueryContextLineageSubgraphRequest.serialize, - response_deserializer=lineage_subgraph.LineageSubgraph.deserialize, - ) - return self._stubs['query_context_lineage_subgraph'] - - @property - def create_execution(self) -> Callable[ - [metadata_service.CreateExecutionRequest], - Awaitable[gca_execution.Execution]]: - r"""Return a callable for the create execution method over gRPC. - - Creates an Execution associated with a MetadataStore. - - Returns: - Callable[[~.CreateExecutionRequest], - Awaitable[~.Execution]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_execution' not in self._stubs: - self._stubs['create_execution'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/CreateExecution', - request_serializer=metadata_service.CreateExecutionRequest.serialize, - response_deserializer=gca_execution.Execution.deserialize, - ) - return self._stubs['create_execution'] - - @property - def get_execution(self) -> Callable[ - [metadata_service.GetExecutionRequest], - Awaitable[execution.Execution]]: - r"""Return a callable for the get execution method over gRPC. - - Retrieves a specific Execution. - - Returns: - Callable[[~.GetExecutionRequest], - Awaitable[~.Execution]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_execution' not in self._stubs: - self._stubs['get_execution'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/GetExecution', - request_serializer=metadata_service.GetExecutionRequest.serialize, - response_deserializer=execution.Execution.deserialize, - ) - return self._stubs['get_execution'] - - @property - def list_executions(self) -> Callable[ - [metadata_service.ListExecutionsRequest], - Awaitable[metadata_service.ListExecutionsResponse]]: - r"""Return a callable for the list executions method over gRPC. - - Lists Executions in the MetadataStore. - - Returns: - Callable[[~.ListExecutionsRequest], - Awaitable[~.ListExecutionsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_executions' not in self._stubs: - self._stubs['list_executions'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/ListExecutions', - request_serializer=metadata_service.ListExecutionsRequest.serialize, - response_deserializer=metadata_service.ListExecutionsResponse.deserialize, - ) - return self._stubs['list_executions'] - - @property - def update_execution(self) -> Callable[ - [metadata_service.UpdateExecutionRequest], - Awaitable[gca_execution.Execution]]: - r"""Return a callable for the update execution method over gRPC. - - Updates a stored Execution. - - Returns: - Callable[[~.UpdateExecutionRequest], - Awaitable[~.Execution]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_execution' not in self._stubs: - self._stubs['update_execution'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/UpdateExecution', - request_serializer=metadata_service.UpdateExecutionRequest.serialize, - response_deserializer=gca_execution.Execution.deserialize, - ) - return self._stubs['update_execution'] - - @property - def delete_execution(self) -> Callable[ - [metadata_service.DeleteExecutionRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete execution method over gRPC. - - Deletes an Execution. - - Returns: - Callable[[~.DeleteExecutionRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_execution' not in self._stubs: - self._stubs['delete_execution'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/DeleteExecution', - request_serializer=metadata_service.DeleteExecutionRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_execution'] - - @property - def purge_executions(self) -> Callable[ - [metadata_service.PurgeExecutionsRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the purge executions method over gRPC. - - Purges Executions. - - Returns: - Callable[[~.PurgeExecutionsRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'purge_executions' not in self._stubs: - self._stubs['purge_executions'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/PurgeExecutions', - request_serializer=metadata_service.PurgeExecutionsRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['purge_executions'] - - @property - def add_execution_events(self) -> Callable[ - [metadata_service.AddExecutionEventsRequest], - Awaitable[metadata_service.AddExecutionEventsResponse]]: - r"""Return a callable for the add execution events method over gRPC. - - Adds Events to the specified Execution. An Event - indicates whether an Artifact was used as an input or - output for an Execution. If an Event already exists - between the Execution and the Artifact, the Event is - skipped. - - Returns: - Callable[[~.AddExecutionEventsRequest], - Awaitable[~.AddExecutionEventsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'add_execution_events' not in self._stubs: - self._stubs['add_execution_events'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/AddExecutionEvents', - request_serializer=metadata_service.AddExecutionEventsRequest.serialize, - response_deserializer=metadata_service.AddExecutionEventsResponse.deserialize, - ) - return self._stubs['add_execution_events'] - - @property - def query_execution_inputs_and_outputs(self) -> Callable[ - [metadata_service.QueryExecutionInputsAndOutputsRequest], - Awaitable[lineage_subgraph.LineageSubgraph]]: - r"""Return a callable for the query execution inputs and - outputs method over gRPC. - - Obtains the set of input and output Artifacts for - this Execution, in the form of LineageSubgraph that also - contains the Execution and connecting Events. - - Returns: - Callable[[~.QueryExecutionInputsAndOutputsRequest], - Awaitable[~.LineageSubgraph]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'query_execution_inputs_and_outputs' not in self._stubs: - self._stubs['query_execution_inputs_and_outputs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/QueryExecutionInputsAndOutputs', - request_serializer=metadata_service.QueryExecutionInputsAndOutputsRequest.serialize, - response_deserializer=lineage_subgraph.LineageSubgraph.deserialize, - ) - return self._stubs['query_execution_inputs_and_outputs'] - - @property - def create_metadata_schema(self) -> Callable[ - [metadata_service.CreateMetadataSchemaRequest], - Awaitable[gca_metadata_schema.MetadataSchema]]: - r"""Return a callable for the create metadata schema method over gRPC. - - Creates a MetadataSchema. - - Returns: - Callable[[~.CreateMetadataSchemaRequest], - Awaitable[~.MetadataSchema]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_metadata_schema' not in self._stubs: - self._stubs['create_metadata_schema'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/CreateMetadataSchema', - request_serializer=metadata_service.CreateMetadataSchemaRequest.serialize, - response_deserializer=gca_metadata_schema.MetadataSchema.deserialize, - ) - return self._stubs['create_metadata_schema'] - - @property - def get_metadata_schema(self) -> Callable[ - [metadata_service.GetMetadataSchemaRequest], - Awaitable[metadata_schema.MetadataSchema]]: - r"""Return a callable for the get metadata schema method over gRPC. - - Retrieves a specific MetadataSchema. - - Returns: - Callable[[~.GetMetadataSchemaRequest], - Awaitable[~.MetadataSchema]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_metadata_schema' not in self._stubs: - self._stubs['get_metadata_schema'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/GetMetadataSchema', - request_serializer=metadata_service.GetMetadataSchemaRequest.serialize, - response_deserializer=metadata_schema.MetadataSchema.deserialize, - ) - return self._stubs['get_metadata_schema'] - - @property - def list_metadata_schemas(self) -> Callable[ - [metadata_service.ListMetadataSchemasRequest], - Awaitable[metadata_service.ListMetadataSchemasResponse]]: - r"""Return a callable for the list metadata schemas method over gRPC. - - Lists MetadataSchemas. - - Returns: - Callable[[~.ListMetadataSchemasRequest], - Awaitable[~.ListMetadataSchemasResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_metadata_schemas' not in self._stubs: - self._stubs['list_metadata_schemas'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/ListMetadataSchemas', - request_serializer=metadata_service.ListMetadataSchemasRequest.serialize, - response_deserializer=metadata_service.ListMetadataSchemasResponse.deserialize, - ) - return self._stubs['list_metadata_schemas'] - - @property - def query_artifact_lineage_subgraph(self) -> Callable[ - [metadata_service.QueryArtifactLineageSubgraphRequest], - Awaitable[lineage_subgraph.LineageSubgraph]]: - r"""Return a callable for the query artifact lineage - subgraph method over gRPC. - - Retrieves lineage of an Artifact represented through - Artifacts and Executions connected by Event edges and - returned as a LineageSubgraph. - - Returns: - Callable[[~.QueryArtifactLineageSubgraphRequest], - Awaitable[~.LineageSubgraph]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'query_artifact_lineage_subgraph' not in self._stubs: - self._stubs['query_artifact_lineage_subgraph'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MetadataService/QueryArtifactLineageSubgraph', - request_serializer=metadata_service.QueryArtifactLineageSubgraphRequest.serialize, - response_deserializer=lineage_subgraph.LineageSubgraph.deserialize, - ) - return self._stubs['query_artifact_lineage_subgraph'] - - def close(self): - return self.grpc_channel.close() - - -__all__ = ( - 'MetadataServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/__init__.py deleted file mode 100644 index b32b10b1d7..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import MigrationServiceClient -from .async_client import MigrationServiceAsyncClient - -__all__ = ( - 'MigrationServiceClient', - 'MigrationServiceAsyncClient', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/async_client.py deleted file mode 100644 index d5b5cdc253..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/async_client.py +++ /dev/null @@ -1,383 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core.client_options import ClientOptions -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api_core import operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.migration_service import pagers -from google.cloud.aiplatform_v1beta1.types import migratable_resource -from google.cloud.aiplatform_v1beta1.types import migration_service -from .transports.base import MigrationServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import MigrationServiceGrpcAsyncIOTransport -from .client import MigrationServiceClient - - -class MigrationServiceAsyncClient: - """A service that migrates resources from automl.googleapis.com, - datalabeling.googleapis.com and ml.googleapis.com to Vertex AI. - """ - - _client: MigrationServiceClient - - DEFAULT_ENDPOINT = MigrationServiceClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = MigrationServiceClient.DEFAULT_MTLS_ENDPOINT - - annotated_dataset_path = staticmethod(MigrationServiceClient.annotated_dataset_path) - parse_annotated_dataset_path = staticmethod(MigrationServiceClient.parse_annotated_dataset_path) - dataset_path = staticmethod(MigrationServiceClient.dataset_path) - parse_dataset_path = staticmethod(MigrationServiceClient.parse_dataset_path) - dataset_path = staticmethod(MigrationServiceClient.dataset_path) - parse_dataset_path = staticmethod(MigrationServiceClient.parse_dataset_path) - dataset_path = staticmethod(MigrationServiceClient.dataset_path) - parse_dataset_path = staticmethod(MigrationServiceClient.parse_dataset_path) - model_path = staticmethod(MigrationServiceClient.model_path) - parse_model_path = staticmethod(MigrationServiceClient.parse_model_path) - model_path = staticmethod(MigrationServiceClient.model_path) - parse_model_path = staticmethod(MigrationServiceClient.parse_model_path) - version_path = staticmethod(MigrationServiceClient.version_path) - parse_version_path = staticmethod(MigrationServiceClient.parse_version_path) - common_billing_account_path = staticmethod(MigrationServiceClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(MigrationServiceClient.parse_common_billing_account_path) - common_folder_path = staticmethod(MigrationServiceClient.common_folder_path) - parse_common_folder_path = staticmethod(MigrationServiceClient.parse_common_folder_path) - common_organization_path = staticmethod(MigrationServiceClient.common_organization_path) - parse_common_organization_path = staticmethod(MigrationServiceClient.parse_common_organization_path) - common_project_path = staticmethod(MigrationServiceClient.common_project_path) - parse_common_project_path = staticmethod(MigrationServiceClient.parse_common_project_path) - common_location_path = staticmethod(MigrationServiceClient.common_location_path) - parse_common_location_path = staticmethod(MigrationServiceClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - MigrationServiceAsyncClient: The constructed client. - """ - return MigrationServiceClient.from_service_account_info.__func__(MigrationServiceAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - MigrationServiceAsyncClient: The constructed client. - """ - return MigrationServiceClient.from_service_account_file.__func__(MigrationServiceAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> MigrationServiceTransport: - """Returns the transport used by the client instance. - - Returns: - MigrationServiceTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(MigrationServiceClient).get_transport_class, type(MigrationServiceClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, MigrationServiceTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the migration service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.MigrationServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = MigrationServiceClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def search_migratable_resources(self, - request: Union[migration_service.SearchMigratableResourcesRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.SearchMigratableResourcesAsyncPager: - r"""Searches all of the resources in - automl.googleapis.com, datalabeling.googleapis.com and - ml.googleapis.com that can be migrated to Vertex AI's - given location. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.SearchMigratableResourcesRequest, dict]): - The request object. Request message for - [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1beta1.MigrationService.SearchMigratableResources]. - parent (:class:`str`): - Required. The location that the migratable resources - should be searched from. It's the Vertex AI location - that the resources can be migrated to, not the - resources' original location. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.migration_service.pagers.SearchMigratableResourcesAsyncPager: - Response message for - [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1beta1.MigrationService.SearchMigratableResources]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = migration_service.SearchMigratableResourcesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.search_migratable_resources, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.SearchMigratableResourcesAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def batch_migrate_resources(self, - request: Union[migration_service.BatchMigrateResourcesRequest, dict] = None, - *, - parent: str = None, - migrate_resource_requests: Sequence[migration_service.MigrateResourceRequest] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Batch migrates resources from ml.googleapis.com, - automl.googleapis.com, and datalabeling.googleapis.com - to Vertex AI. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.BatchMigrateResourcesRequest, dict]): - The request object. Request message for - [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1beta1.MigrationService.BatchMigrateResources]. - parent (:class:`str`): - Required. The location of the migrated resource will - live in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - migrate_resource_requests (:class:`Sequence[google.cloud.aiplatform_v1beta1.types.MigrateResourceRequest]`): - Required. The request messages - specifying the resources to migrate. - They must be in the same location as the - destination. Up to 50 resources can be - migrated in one batch. - - This corresponds to the ``migrate_resource_requests`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.BatchMigrateResourcesResponse` - Response message for - [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1beta1.MigrationService.BatchMigrateResources]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, migrate_resource_requests]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = migration_service.BatchMigrateResourcesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if migrate_resource_requests: - request.migrate_resource_requests.extend(migrate_resource_requests) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.batch_migrate_resources, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - migration_service.BatchMigrateResourcesResponse, - metadata_type=migration_service.BatchMigrateResourcesOperationMetadata, - ) - - # Done; return the response. - return response - - async def __aenter__(self): - return self - - async def __aexit__(self, exc_type, exc, tb): - await self.transport.close() - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "MigrationServiceAsyncClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/client.py deleted file mode 100644 index 33a0610379..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/client.py +++ /dev/null @@ -1,635 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import os -import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api_core import operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.migration_service import pagers -from google.cloud.aiplatform_v1beta1.types import migratable_resource -from google.cloud.aiplatform_v1beta1.types import migration_service -from .transports.base import MigrationServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import MigrationServiceGrpcTransport -from .transports.grpc_asyncio import MigrationServiceGrpcAsyncIOTransport - - -class MigrationServiceClientMeta(type): - """Metaclass for the MigrationService client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[MigrationServiceTransport]] - _transport_registry["grpc"] = MigrationServiceGrpcTransport - _transport_registry["grpc_asyncio"] = MigrationServiceGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[MigrationServiceTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class MigrationServiceClient(metaclass=MigrationServiceClientMeta): - """A service that migrates resources from automl.googleapis.com, - datalabeling.googleapis.com and ml.googleapis.com to Vertex AI. - """ - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "aiplatform.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - MigrationServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - MigrationServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> MigrationServiceTransport: - """Returns the transport used by the client instance. - - Returns: - MigrationServiceTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def annotated_dataset_path(project: str,dataset: str,annotated_dataset: str,) -> str: - """Returns a fully-qualified annotated_dataset string.""" - return "projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}".format(project=project, dataset=dataset, annotated_dataset=annotated_dataset, ) - - @staticmethod - def parse_annotated_dataset_path(path: str) -> Dict[str,str]: - """Parses a annotated_dataset path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)/annotatedDatasets/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def dataset_path(project: str,location: str,dataset: str,) -> str: - """Returns a fully-qualified dataset string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) - - @staticmethod - def parse_dataset_path(path: str) -> Dict[str,str]: - """Parses a dataset path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def dataset_path(project: str,dataset: str,) -> str: - """Returns a fully-qualified dataset string.""" - return "projects/{project}/datasets/{dataset}".format(project=project, dataset=dataset, ) - - @staticmethod - def parse_dataset_path(path: str) -> Dict[str,str]: - """Parses a dataset path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def dataset_path(project: str,location: str,dataset: str,) -> str: - """Returns a fully-qualified dataset string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) - - @staticmethod - def parse_dataset_path(path: str) -> Dict[str,str]: - """Parses a dataset path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def model_path(project: str,location: str,model: str,) -> str: - """Returns a fully-qualified model string.""" - return "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) - - @staticmethod - def parse_model_path(path: str) -> Dict[str,str]: - """Parses a model path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def model_path(project: str,location: str,model: str,) -> str: - """Returns a fully-qualified model string.""" - return "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) - - @staticmethod - def parse_model_path(path: str) -> Dict[str,str]: - """Parses a model path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def version_path(project: str,model: str,version: str,) -> str: - """Returns a fully-qualified version string.""" - return "projects/{project}/models/{model}/versions/{version}".format(project=project, model=model, version=version, ) - - @staticmethod - def parse_version_path(path: str) -> Dict[str,str]: - """Parses a version path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/models/(?P.+?)/versions/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, MigrationServiceTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the migration service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, MigrationServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): - raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, MigrationServiceTransport): - # transport is a MigrationServiceTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - always_use_jwt_access=True, - ) - - def search_migratable_resources(self, - request: Union[migration_service.SearchMigratableResourcesRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.SearchMigratableResourcesPager: - r"""Searches all of the resources in - automl.googleapis.com, datalabeling.googleapis.com and - ml.googleapis.com that can be migrated to Vertex AI's - given location. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.SearchMigratableResourcesRequest, dict]): - The request object. Request message for - [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1beta1.MigrationService.SearchMigratableResources]. - parent (str): - Required. The location that the migratable resources - should be searched from. It's the Vertex AI location - that the resources can be migrated to, not the - resources' original location. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.migration_service.pagers.SearchMigratableResourcesPager: - Response message for - [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1beta1.MigrationService.SearchMigratableResources]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a migration_service.SearchMigratableResourcesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, migration_service.SearchMigratableResourcesRequest): - request = migration_service.SearchMigratableResourcesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.search_migratable_resources] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.SearchMigratableResourcesPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def batch_migrate_resources(self, - request: Union[migration_service.BatchMigrateResourcesRequest, dict] = None, - *, - parent: str = None, - migrate_resource_requests: Sequence[migration_service.MigrateResourceRequest] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: - r"""Batch migrates resources from ml.googleapis.com, - automl.googleapis.com, and datalabeling.googleapis.com - to Vertex AI. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.BatchMigrateResourcesRequest, dict]): - The request object. Request message for - [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1beta1.MigrationService.BatchMigrateResources]. - parent (str): - Required. The location of the migrated resource will - live in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - migrate_resource_requests (Sequence[google.cloud.aiplatform_v1beta1.types.MigrateResourceRequest]): - Required. The request messages - specifying the resources to migrate. - They must be in the same location as the - destination. Up to 50 resources can be - migrated in one batch. - - This corresponds to the ``migrate_resource_requests`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.BatchMigrateResourcesResponse` - Response message for - [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1beta1.MigrationService.BatchMigrateResources]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, migrate_resource_requests]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a migration_service.BatchMigrateResourcesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, migration_service.BatchMigrateResourcesRequest): - request = migration_service.BatchMigrateResourcesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if migrate_resource_requests is not None: - request.migrate_resource_requests = migrate_resource_requests - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.batch_migrate_resources] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - migration_service.BatchMigrateResourcesResponse, - metadata_type=migration_service.BatchMigrateResourcesOperationMetadata, - ) - - # Done; return the response. - return response - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - """Releases underlying transport's resources. - - .. warning:: - ONLY use as a context manager if the transport is NOT shared - with other clients! Exiting the with block will CLOSE the transport - and may cause errors in other clients! - """ - self.transport.close() - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "MigrationServiceClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/pagers.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/pagers.py deleted file mode 100644 index 3d48f15ba4..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/pagers.py +++ /dev/null @@ -1,141 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator - -from google.cloud.aiplatform_v1beta1.types import migratable_resource -from google.cloud.aiplatform_v1beta1.types import migration_service - - -class SearchMigratableResourcesPager: - """A pager for iterating through ``search_migratable_resources`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.SearchMigratableResourcesResponse` object, and - provides an ``__iter__`` method to iterate through its - ``migratable_resources`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``SearchMigratableResources`` requests and continue to iterate - through the ``migratable_resources`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.SearchMigratableResourcesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., migration_service.SearchMigratableResourcesResponse], - request: migration_service.SearchMigratableResourcesRequest, - response: migration_service.SearchMigratableResourcesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.SearchMigratableResourcesRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.SearchMigratableResourcesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = migration_service.SearchMigratableResourcesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[migration_service.SearchMigratableResourcesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[migratable_resource.MigratableResource]: - for page in self.pages: - yield from page.migratable_resources - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class SearchMigratableResourcesAsyncPager: - """A pager for iterating through ``search_migratable_resources`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.SearchMigratableResourcesResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``migratable_resources`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``SearchMigratableResources`` requests and continue to iterate - through the ``migratable_resources`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.SearchMigratableResourcesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[migration_service.SearchMigratableResourcesResponse]], - request: migration_service.SearchMigratableResourcesRequest, - response: migration_service.SearchMigratableResourcesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.SearchMigratableResourcesRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.SearchMigratableResourcesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = migration_service.SearchMigratableResourcesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[migration_service.SearchMigratableResourcesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[migratable_resource.MigratableResource]: - async def async_generator(): - async for page in self.pages: - for response in page.migratable_resources: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/__init__.py deleted file mode 100644 index 8f036c410e..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import MigrationServiceTransport -from .grpc import MigrationServiceGrpcTransport -from .grpc_asyncio import MigrationServiceGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[MigrationServiceTransport]] -_transport_registry['grpc'] = MigrationServiceGrpcTransport -_transport_registry['grpc_asyncio'] = MigrationServiceGrpcAsyncIOTransport - -__all__ = ( - 'MigrationServiceTransport', - 'MigrationServiceGrpcTransport', - 'MigrationServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/base.py deleted file mode 100644 index 6ef2730d03..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/base.py +++ /dev/null @@ -1,167 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import pkg_resources - -import google.auth # type: ignore -import google.api_core -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.aiplatform_v1beta1.types import migration_service -from google.longrunning import operations_pb2 # type: ignore - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-aiplatform', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -class MigrationServiceTransport(abc.ABC): - """Abstract transport class for MigrationService.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'aiplatform.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials are service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.search_migratable_resources: gapic_v1.method.wrap_method( - self.search_migratable_resources, - default_timeout=None, - client_info=client_info, - ), - self.batch_migrate_resources: gapic_v1.method.wrap_method( - self.batch_migrate_resources, - default_timeout=None, - client_info=client_info, - ), - } - - def close(self): - """Closes resources associated with the transport. - - .. warning:: - Only call this method if the transport is NOT shared - with other clients - this may cause errors in other clients! - """ - raise NotImplementedError() - - @property - def operations_client(self): - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def search_migratable_resources(self) -> Callable[ - [migration_service.SearchMigratableResourcesRequest], - Union[ - migration_service.SearchMigratableResourcesResponse, - Awaitable[migration_service.SearchMigratableResourcesResponse] - ]]: - raise NotImplementedError() - - @property - def batch_migrate_resources(self) -> Callable[ - [migration_service.BatchMigrateResourcesRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'MigrationServiceTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc.py deleted file mode 100644 index 8c606d60de..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc.py +++ /dev/null @@ -1,305 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers -from google.api_core import operations_v1 -from google.api_core import gapic_v1 -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.aiplatform_v1beta1.types import migration_service -from google.longrunning import operations_pb2 # type: ignore -from .base import MigrationServiceTransport, DEFAULT_CLIENT_INFO - - -class MigrationServiceGrpcTransport(MigrationServiceTransport): - """gRPC backend transport for MigrationService. - - A service that migrates resources from automl.googleapis.com, - datalabeling.googleapis.com and ml.googleapis.com to Vertex AI. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def search_migratable_resources(self) -> Callable[ - [migration_service.SearchMigratableResourcesRequest], - migration_service.SearchMigratableResourcesResponse]: - r"""Return a callable for the search migratable resources method over gRPC. - - Searches all of the resources in - automl.googleapis.com, datalabeling.googleapis.com and - ml.googleapis.com that can be migrated to Vertex AI's - given location. - - Returns: - Callable[[~.SearchMigratableResourcesRequest], - ~.SearchMigratableResourcesResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'search_migratable_resources' not in self._stubs: - self._stubs['search_migratable_resources'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MigrationService/SearchMigratableResources', - request_serializer=migration_service.SearchMigratableResourcesRequest.serialize, - response_deserializer=migration_service.SearchMigratableResourcesResponse.deserialize, - ) - return self._stubs['search_migratable_resources'] - - @property - def batch_migrate_resources(self) -> Callable[ - [migration_service.BatchMigrateResourcesRequest], - operations_pb2.Operation]: - r"""Return a callable for the batch migrate resources method over gRPC. - - Batch migrates resources from ml.googleapis.com, - automl.googleapis.com, and datalabeling.googleapis.com - to Vertex AI. - - Returns: - Callable[[~.BatchMigrateResourcesRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'batch_migrate_resources' not in self._stubs: - self._stubs['batch_migrate_resources'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MigrationService/BatchMigrateResources', - request_serializer=migration_service.BatchMigrateResourcesRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['batch_migrate_resources'] - - def close(self): - self.grpc_channel.close() - -__all__ = ( - 'MigrationServiceGrpcTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc_asyncio.py deleted file mode 100644 index 59c601b8d4..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc_asyncio.py +++ /dev/null @@ -1,309 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers_async -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.aiplatform_v1beta1.types import migration_service -from google.longrunning import operations_pb2 # type: ignore -from .base import MigrationServiceTransport, DEFAULT_CLIENT_INFO -from .grpc import MigrationServiceGrpcTransport - - -class MigrationServiceGrpcAsyncIOTransport(MigrationServiceTransport): - """gRPC AsyncIO backend transport for MigrationService. - - A service that migrates resources from automl.googleapis.com, - datalabeling.googleapis.com and ml.googleapis.com to Vertex AI. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsAsyncClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def search_migratable_resources(self) -> Callable[ - [migration_service.SearchMigratableResourcesRequest], - Awaitable[migration_service.SearchMigratableResourcesResponse]]: - r"""Return a callable for the search migratable resources method over gRPC. - - Searches all of the resources in - automl.googleapis.com, datalabeling.googleapis.com and - ml.googleapis.com that can be migrated to Vertex AI's - given location. - - Returns: - Callable[[~.SearchMigratableResourcesRequest], - Awaitable[~.SearchMigratableResourcesResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'search_migratable_resources' not in self._stubs: - self._stubs['search_migratable_resources'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MigrationService/SearchMigratableResources', - request_serializer=migration_service.SearchMigratableResourcesRequest.serialize, - response_deserializer=migration_service.SearchMigratableResourcesResponse.deserialize, - ) - return self._stubs['search_migratable_resources'] - - @property - def batch_migrate_resources(self) -> Callable[ - [migration_service.BatchMigrateResourcesRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the batch migrate resources method over gRPC. - - Batch migrates resources from ml.googleapis.com, - automl.googleapis.com, and datalabeling.googleapis.com - to Vertex AI. - - Returns: - Callable[[~.BatchMigrateResourcesRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'batch_migrate_resources' not in self._stubs: - self._stubs['batch_migrate_resources'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.MigrationService/BatchMigrateResources', - request_serializer=migration_service.BatchMigrateResourcesRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['batch_migrate_resources'] - - def close(self): - return self.grpc_channel.close() - - -__all__ = ( - 'MigrationServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/__init__.py deleted file mode 100644 index 5c4d570d15..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import ModelServiceClient -from .async_client import ModelServiceAsyncClient - -__all__ = ( - 'ModelServiceClient', - 'ModelServiceAsyncClient', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py deleted file mode 100644 index 8c40918cc2..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/async_client.py +++ /dev/null @@ -1,1072 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core.client_options import ClientOptions -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.model_service import pagers -from google.cloud.aiplatform_v1beta1.types import deployed_model_ref -from google.cloud.aiplatform_v1beta1.types import encryption_spec -from google.cloud.aiplatform_v1beta1.types import explanation -from google.cloud.aiplatform_v1beta1.types import model -from google.cloud.aiplatform_v1beta1.types import model as gca_model -from google.cloud.aiplatform_v1beta1.types import model_evaluation -from google.cloud.aiplatform_v1beta1.types import model_evaluation_slice -from google.cloud.aiplatform_v1beta1.types import model_service -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import ModelServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import ModelServiceGrpcAsyncIOTransport -from .client import ModelServiceClient - - -class ModelServiceAsyncClient: - """A service for managing Vertex AI's machine learning Models.""" - - _client: ModelServiceClient - - DEFAULT_ENDPOINT = ModelServiceClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = ModelServiceClient.DEFAULT_MTLS_ENDPOINT - - endpoint_path = staticmethod(ModelServiceClient.endpoint_path) - parse_endpoint_path = staticmethod(ModelServiceClient.parse_endpoint_path) - model_path = staticmethod(ModelServiceClient.model_path) - parse_model_path = staticmethod(ModelServiceClient.parse_model_path) - model_evaluation_path = staticmethod(ModelServiceClient.model_evaluation_path) - parse_model_evaluation_path = staticmethod(ModelServiceClient.parse_model_evaluation_path) - model_evaluation_slice_path = staticmethod(ModelServiceClient.model_evaluation_slice_path) - parse_model_evaluation_slice_path = staticmethod(ModelServiceClient.parse_model_evaluation_slice_path) - training_pipeline_path = staticmethod(ModelServiceClient.training_pipeline_path) - parse_training_pipeline_path = staticmethod(ModelServiceClient.parse_training_pipeline_path) - common_billing_account_path = staticmethod(ModelServiceClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(ModelServiceClient.parse_common_billing_account_path) - common_folder_path = staticmethod(ModelServiceClient.common_folder_path) - parse_common_folder_path = staticmethod(ModelServiceClient.parse_common_folder_path) - common_organization_path = staticmethod(ModelServiceClient.common_organization_path) - parse_common_organization_path = staticmethod(ModelServiceClient.parse_common_organization_path) - common_project_path = staticmethod(ModelServiceClient.common_project_path) - parse_common_project_path = staticmethod(ModelServiceClient.parse_common_project_path) - common_location_path = staticmethod(ModelServiceClient.common_location_path) - parse_common_location_path = staticmethod(ModelServiceClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - ModelServiceAsyncClient: The constructed client. - """ - return ModelServiceClient.from_service_account_info.__func__(ModelServiceAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - ModelServiceAsyncClient: The constructed client. - """ - return ModelServiceClient.from_service_account_file.__func__(ModelServiceAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> ModelServiceTransport: - """Returns the transport used by the client instance. - - Returns: - ModelServiceTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(ModelServiceClient).get_transport_class, type(ModelServiceClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, ModelServiceTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the model service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.ModelServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = ModelServiceClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def upload_model(self, - request: Union[model_service.UploadModelRequest, dict] = None, - *, - parent: str = None, - model: gca_model.Model = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Uploads a Model artifact into Vertex AI. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.UploadModelRequest, dict]): - The request object. Request message for - [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel]. - parent (:class:`str`): - Required. The resource name of the Location into which - to upload the Model. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - model (:class:`google.cloud.aiplatform_v1beta1.types.Model`): - Required. The Model to create. - This corresponds to the ``model`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.UploadModelResponse` - Response message of - [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] - operation. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, model]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = model_service.UploadModelRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if model is not None: - request.model = model - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.upload_model, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - model_service.UploadModelResponse, - metadata_type=model_service.UploadModelOperationMetadata, - ) - - # Done; return the response. - return response - - async def get_model(self, - request: Union[model_service.GetModelRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> model.Model: - r"""Gets a Model. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.GetModelRequest, dict]): - The request object. Request message for - [ModelService.GetModel][google.cloud.aiplatform.v1beta1.ModelService.GetModel]. - name (:class:`str`): - Required. The name of the Model resource. Format: - ``projects/{project}/locations/{location}/models/{model}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Model: - A trained machine learning Model. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = model_service.GetModelRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_model, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_models(self, - request: Union[model_service.ListModelsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListModelsAsyncPager: - r"""Lists Models in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListModelsRequest, dict]): - The request object. Request message for - [ModelService.ListModels][google.cloud.aiplatform.v1beta1.ModelService.ListModels]. - parent (:class:`str`): - Required. The resource name of the Location to list the - Models from. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelsAsyncPager: - Response message for - [ModelService.ListModels][google.cloud.aiplatform.v1beta1.ModelService.ListModels] - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = model_service.ListModelsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_models, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListModelsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_model(self, - request: Union[model_service.UpdateModelRequest, dict] = None, - *, - model: gca_model.Model = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_model.Model: - r"""Updates a Model. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.UpdateModelRequest, dict]): - The request object. Request message for - [ModelService.UpdateModel][google.cloud.aiplatform.v1beta1.ModelService.UpdateModel]. - model (:class:`google.cloud.aiplatform_v1beta1.types.Model`): - Required. The Model which replaces - the resource on the server. - - This corresponds to the ``model`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. The update mask applies to the resource. For - the ``FieldMask`` definition, see - [google.protobuf.FieldMask][google.protobuf.FieldMask]. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Model: - A trained machine learning Model. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([model, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = model_service.UpdateModelRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if model is not None: - request.model = model - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_model, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("model.name", request.model.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_model(self, - request: Union[model_service.DeleteModelRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a Model. - - A model cannot be deleted if any - [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] resource - has a - [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] - based on the model in its - [deployed_models][google.cloud.aiplatform.v1beta1.Endpoint.deployed_models] - field. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.DeleteModelRequest, dict]): - The request object. Request message for - [ModelService.DeleteModel][google.cloud.aiplatform.v1beta1.ModelService.DeleteModel]. - name (:class:`str`): - Required. The name of the Model resource to be deleted. - Format: - ``projects/{project}/locations/{location}/models/{model}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = model_service.DeleteModelRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_model, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def export_model(self, - request: Union[model_service.ExportModelRequest, dict] = None, - *, - name: str = None, - output_config: model_service.ExportModelRequest.OutputConfig = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Exports a trained, exportable Model to a location specified by - the user. A Model is considered to be exportable if it has at - least one [supported export - format][google.cloud.aiplatform.v1beta1.Model.supported_export_formats]. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ExportModelRequest, dict]): - The request object. Request message for - [ModelService.ExportModel][google.cloud.aiplatform.v1beta1.ModelService.ExportModel]. - name (:class:`str`): - Required. The resource name of the - Model to export. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - output_config (:class:`google.cloud.aiplatform_v1beta1.types.ExportModelRequest.OutputConfig`): - Required. The desired output location - and configuration. - - This corresponds to the ``output_config`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.ExportModelResponse` - Response message of - [ModelService.ExportModel][google.cloud.aiplatform.v1beta1.ModelService.ExportModel] - operation. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, output_config]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = model_service.ExportModelRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if output_config is not None: - request.output_config = output_config - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.export_model, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - model_service.ExportModelResponse, - metadata_type=model_service.ExportModelOperationMetadata, - ) - - # Done; return the response. - return response - - async def get_model_evaluation(self, - request: Union[model_service.GetModelEvaluationRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> model_evaluation.ModelEvaluation: - r"""Gets a ModelEvaluation. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.GetModelEvaluationRequest, dict]): - The request object. Request message for - [ModelService.GetModelEvaluation][google.cloud.aiplatform.v1beta1.ModelService.GetModelEvaluation]. - name (:class:`str`): - Required. The name of the ModelEvaluation resource. - Format: - ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.ModelEvaluation: - A collection of metrics calculated by - comparing Model's predictions on all of - the test data against annotations from - the test data. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = model_service.GetModelEvaluationRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_model_evaluation, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_model_evaluations(self, - request: Union[model_service.ListModelEvaluationsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListModelEvaluationsAsyncPager: - r"""Lists ModelEvaluations in a Model. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListModelEvaluationsRequest, dict]): - The request object. Request message for - [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluations]. - parent (:class:`str`): - Required. The resource name of the Model to list the - ModelEvaluations from. Format: - ``projects/{project}/locations/{location}/models/{model}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelEvaluationsAsyncPager: - Response message for - [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluations]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = model_service.ListModelEvaluationsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_model_evaluations, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListModelEvaluationsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_model_evaluation_slice(self, - request: Union[model_service.GetModelEvaluationSliceRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> model_evaluation_slice.ModelEvaluationSlice: - r"""Gets a ModelEvaluationSlice. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.GetModelEvaluationSliceRequest, dict]): - The request object. Request message for - [ModelService.GetModelEvaluationSlice][google.cloud.aiplatform.v1beta1.ModelService.GetModelEvaluationSlice]. - name (:class:`str`): - Required. The name of the ModelEvaluationSlice resource. - Format: - ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.ModelEvaluationSlice: - A collection of metrics calculated by - comparing Model's predictions on a slice - of the test data against ground truth - annotations. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = model_service.GetModelEvaluationSliceRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_model_evaluation_slice, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_model_evaluation_slices(self, - request: Union[model_service.ListModelEvaluationSlicesRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListModelEvaluationSlicesAsyncPager: - r"""Lists ModelEvaluationSlices in a ModelEvaluation. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListModelEvaluationSlicesRequest, dict]): - The request object. Request message for - [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices]. - parent (:class:`str`): - Required. The resource name of the ModelEvaluation to - list the ModelEvaluationSlices from. Format: - ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelEvaluationSlicesAsyncPager: - Response message for - [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = model_service.ListModelEvaluationSlicesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_model_evaluation_slices, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListModelEvaluationSlicesAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def __aenter__(self): - return self - - async def __aexit__(self, exc_type, exc, tb): - await self.transport.close() - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "ModelServiceAsyncClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/client.py deleted file mode 100644 index b1720ea2a4..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/client.py +++ /dev/null @@ -1,1306 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import os -import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.model_service import pagers -from google.cloud.aiplatform_v1beta1.types import deployed_model_ref -from google.cloud.aiplatform_v1beta1.types import encryption_spec -from google.cloud.aiplatform_v1beta1.types import explanation -from google.cloud.aiplatform_v1beta1.types import model -from google.cloud.aiplatform_v1beta1.types import model as gca_model -from google.cloud.aiplatform_v1beta1.types import model_evaluation -from google.cloud.aiplatform_v1beta1.types import model_evaluation_slice -from google.cloud.aiplatform_v1beta1.types import model_service -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import ModelServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import ModelServiceGrpcTransport -from .transports.grpc_asyncio import ModelServiceGrpcAsyncIOTransport - - -class ModelServiceClientMeta(type): - """Metaclass for the ModelService client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[ModelServiceTransport]] - _transport_registry["grpc"] = ModelServiceGrpcTransport - _transport_registry["grpc_asyncio"] = ModelServiceGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[ModelServiceTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class ModelServiceClient(metaclass=ModelServiceClientMeta): - """A service for managing Vertex AI's machine learning Models.""" - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "aiplatform.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - ModelServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - ModelServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> ModelServiceTransport: - """Returns the transport used by the client instance. - - Returns: - ModelServiceTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def endpoint_path(project: str,location: str,endpoint: str,) -> str: - """Returns a fully-qualified endpoint string.""" - return "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) - - @staticmethod - def parse_endpoint_path(path: str) -> Dict[str,str]: - """Parses a endpoint path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/endpoints/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def model_path(project: str,location: str,model: str,) -> str: - """Returns a fully-qualified model string.""" - return "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) - - @staticmethod - def parse_model_path(path: str) -> Dict[str,str]: - """Parses a model path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def model_evaluation_path(project: str,location: str,model: str,evaluation: str,) -> str: - """Returns a fully-qualified model_evaluation string.""" - return "projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}".format(project=project, location=location, model=model, evaluation=evaluation, ) - - @staticmethod - def parse_model_evaluation_path(path: str) -> Dict[str,str]: - """Parses a model_evaluation path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)/evaluations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def model_evaluation_slice_path(project: str,location: str,model: str,evaluation: str,slice: str,) -> str: - """Returns a fully-qualified model_evaluation_slice string.""" - return "projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}".format(project=project, location=location, model=model, evaluation=evaluation, slice=slice, ) - - @staticmethod - def parse_model_evaluation_slice_path(path: str) -> Dict[str,str]: - """Parses a model_evaluation_slice path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)/evaluations/(?P.+?)/slices/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def training_pipeline_path(project: str,location: str,training_pipeline: str,) -> str: - """Returns a fully-qualified training_pipeline string.""" - return "projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}".format(project=project, location=location, training_pipeline=training_pipeline, ) - - @staticmethod - def parse_training_pipeline_path(path: str) -> Dict[str,str]: - """Parses a training_pipeline path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/trainingPipelines/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, ModelServiceTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the model service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ModelServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): - raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, ModelServiceTransport): - # transport is a ModelServiceTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - always_use_jwt_access=True, - ) - - def upload_model(self, - request: Union[model_service.UploadModelRequest, dict] = None, - *, - parent: str = None, - model: gca_model.Model = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Uploads a Model artifact into Vertex AI. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.UploadModelRequest, dict]): - The request object. Request message for - [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel]. - parent (str): - Required. The resource name of the Location into which - to upload the Model. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - model (google.cloud.aiplatform_v1beta1.types.Model): - Required. The Model to create. - This corresponds to the ``model`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.UploadModelResponse` - Response message of - [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] - operation. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, model]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a model_service.UploadModelRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, model_service.UploadModelRequest): - request = model_service.UploadModelRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if model is not None: - request.model = model - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.upload_model] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - model_service.UploadModelResponse, - metadata_type=model_service.UploadModelOperationMetadata, - ) - - # Done; return the response. - return response - - def get_model(self, - request: Union[model_service.GetModelRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> model.Model: - r"""Gets a Model. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.GetModelRequest, dict]): - The request object. Request message for - [ModelService.GetModel][google.cloud.aiplatform.v1beta1.ModelService.GetModel]. - name (str): - Required. The name of the Model resource. Format: - ``projects/{project}/locations/{location}/models/{model}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Model: - A trained machine learning Model. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a model_service.GetModelRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, model_service.GetModelRequest): - request = model_service.GetModelRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_model] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_models(self, - request: Union[model_service.ListModelsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListModelsPager: - r"""Lists Models in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListModelsRequest, dict]): - The request object. Request message for - [ModelService.ListModels][google.cloud.aiplatform.v1beta1.ModelService.ListModels]. - parent (str): - Required. The resource name of the Location to list the - Models from. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelsPager: - Response message for - [ModelService.ListModels][google.cloud.aiplatform.v1beta1.ModelService.ListModels] - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a model_service.ListModelsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, model_service.ListModelsRequest): - request = model_service.ListModelsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_models] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListModelsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_model(self, - request: Union[model_service.UpdateModelRequest, dict] = None, - *, - model: gca_model.Model = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_model.Model: - r"""Updates a Model. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.UpdateModelRequest, dict]): - The request object. Request message for - [ModelService.UpdateModel][google.cloud.aiplatform.v1beta1.ModelService.UpdateModel]. - model (google.cloud.aiplatform_v1beta1.types.Model): - Required. The Model which replaces - the resource on the server. - - This corresponds to the ``model`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The update mask applies to the resource. For - the ``FieldMask`` definition, see - [google.protobuf.FieldMask][google.protobuf.FieldMask]. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Model: - A trained machine learning Model. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([model, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a model_service.UpdateModelRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, model_service.UpdateModelRequest): - request = model_service.UpdateModelRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if model is not None: - request.model = model - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_model] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("model.name", request.model.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_model(self, - request: Union[model_service.DeleteModelRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a Model. - - A model cannot be deleted if any - [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] resource - has a - [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] - based on the model in its - [deployed_models][google.cloud.aiplatform.v1beta1.Endpoint.deployed_models] - field. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.DeleteModelRequest, dict]): - The request object. Request message for - [ModelService.DeleteModel][google.cloud.aiplatform.v1beta1.ModelService.DeleteModel]. - name (str): - Required. The name of the Model resource to be deleted. - Format: - ``projects/{project}/locations/{location}/models/{model}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a model_service.DeleteModelRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, model_service.DeleteModelRequest): - request = model_service.DeleteModelRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_model] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def export_model(self, - request: Union[model_service.ExportModelRequest, dict] = None, - *, - name: str = None, - output_config: model_service.ExportModelRequest.OutputConfig = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Exports a trained, exportable Model to a location specified by - the user. A Model is considered to be exportable if it has at - least one [supported export - format][google.cloud.aiplatform.v1beta1.Model.supported_export_formats]. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ExportModelRequest, dict]): - The request object. Request message for - [ModelService.ExportModel][google.cloud.aiplatform.v1beta1.ModelService.ExportModel]. - name (str): - Required. The resource name of the - Model to export. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - output_config (google.cloud.aiplatform_v1beta1.types.ExportModelRequest.OutputConfig): - Required. The desired output location - and configuration. - - This corresponds to the ``output_config`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.ExportModelResponse` - Response message of - [ModelService.ExportModel][google.cloud.aiplatform.v1beta1.ModelService.ExportModel] - operation. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, output_config]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a model_service.ExportModelRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, model_service.ExportModelRequest): - request = model_service.ExportModelRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if output_config is not None: - request.output_config = output_config - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.export_model] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - model_service.ExportModelResponse, - metadata_type=model_service.ExportModelOperationMetadata, - ) - - # Done; return the response. - return response - - def get_model_evaluation(self, - request: Union[model_service.GetModelEvaluationRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> model_evaluation.ModelEvaluation: - r"""Gets a ModelEvaluation. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.GetModelEvaluationRequest, dict]): - The request object. Request message for - [ModelService.GetModelEvaluation][google.cloud.aiplatform.v1beta1.ModelService.GetModelEvaluation]. - name (str): - Required. The name of the ModelEvaluation resource. - Format: - ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.ModelEvaluation: - A collection of metrics calculated by - comparing Model's predictions on all of - the test data against annotations from - the test data. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a model_service.GetModelEvaluationRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, model_service.GetModelEvaluationRequest): - request = model_service.GetModelEvaluationRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_model_evaluation] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_model_evaluations(self, - request: Union[model_service.ListModelEvaluationsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListModelEvaluationsPager: - r"""Lists ModelEvaluations in a Model. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListModelEvaluationsRequest, dict]): - The request object. Request message for - [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluations]. - parent (str): - Required. The resource name of the Model to list the - ModelEvaluations from. Format: - ``projects/{project}/locations/{location}/models/{model}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelEvaluationsPager: - Response message for - [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluations]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a model_service.ListModelEvaluationsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, model_service.ListModelEvaluationsRequest): - request = model_service.ListModelEvaluationsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_model_evaluations] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListModelEvaluationsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_model_evaluation_slice(self, - request: Union[model_service.GetModelEvaluationSliceRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> model_evaluation_slice.ModelEvaluationSlice: - r"""Gets a ModelEvaluationSlice. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.GetModelEvaluationSliceRequest, dict]): - The request object. Request message for - [ModelService.GetModelEvaluationSlice][google.cloud.aiplatform.v1beta1.ModelService.GetModelEvaluationSlice]. - name (str): - Required. The name of the ModelEvaluationSlice resource. - Format: - ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.ModelEvaluationSlice: - A collection of metrics calculated by - comparing Model's predictions on a slice - of the test data against ground truth - annotations. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a model_service.GetModelEvaluationSliceRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, model_service.GetModelEvaluationSliceRequest): - request = model_service.GetModelEvaluationSliceRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_model_evaluation_slice] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_model_evaluation_slices(self, - request: Union[model_service.ListModelEvaluationSlicesRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListModelEvaluationSlicesPager: - r"""Lists ModelEvaluationSlices in a ModelEvaluation. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListModelEvaluationSlicesRequest, dict]): - The request object. Request message for - [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices]. - parent (str): - Required. The resource name of the ModelEvaluation to - list the ModelEvaluationSlices from. Format: - ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelEvaluationSlicesPager: - Response message for - [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a model_service.ListModelEvaluationSlicesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, model_service.ListModelEvaluationSlicesRequest): - request = model_service.ListModelEvaluationSlicesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_model_evaluation_slices] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListModelEvaluationSlicesPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - """Releases underlying transport's resources. - - .. warning:: - ONLY use as a context manager if the transport is NOT shared - with other clients! Exiting the with block will CLOSE the transport - and may cause errors in other clients! - """ - self.transport.close() - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "ModelServiceClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/pagers.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/pagers.py deleted file mode 100644 index b41cf2a2bc..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/pagers.py +++ /dev/null @@ -1,387 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator - -from google.cloud.aiplatform_v1beta1.types import model -from google.cloud.aiplatform_v1beta1.types import model_evaluation -from google.cloud.aiplatform_v1beta1.types import model_evaluation_slice -from google.cloud.aiplatform_v1beta1.types import model_service - - -class ListModelsPager: - """A pager for iterating through ``list_models`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListModelsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``models`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListModels`` requests and continue to iterate - through the ``models`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListModelsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., model_service.ListModelsResponse], - request: model_service.ListModelsRequest, - response: model_service.ListModelsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListModelsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListModelsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = model_service.ListModelsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[model_service.ListModelsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[model.Model]: - for page in self.pages: - yield from page.models - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListModelsAsyncPager: - """A pager for iterating through ``list_models`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListModelsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``models`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListModels`` requests and continue to iterate - through the ``models`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListModelsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[model_service.ListModelsResponse]], - request: model_service.ListModelsRequest, - response: model_service.ListModelsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListModelsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListModelsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = model_service.ListModelsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[model_service.ListModelsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[model.Model]: - async def async_generator(): - async for page in self.pages: - for response in page.models: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListModelEvaluationsPager: - """A pager for iterating through ``list_model_evaluations`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListModelEvaluationsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``model_evaluations`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListModelEvaluations`` requests and continue to iterate - through the ``model_evaluations`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListModelEvaluationsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., model_service.ListModelEvaluationsResponse], - request: model_service.ListModelEvaluationsRequest, - response: model_service.ListModelEvaluationsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListModelEvaluationsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListModelEvaluationsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = model_service.ListModelEvaluationsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[model_service.ListModelEvaluationsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[model_evaluation.ModelEvaluation]: - for page in self.pages: - yield from page.model_evaluations - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListModelEvaluationsAsyncPager: - """A pager for iterating through ``list_model_evaluations`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListModelEvaluationsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``model_evaluations`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListModelEvaluations`` requests and continue to iterate - through the ``model_evaluations`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListModelEvaluationsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[model_service.ListModelEvaluationsResponse]], - request: model_service.ListModelEvaluationsRequest, - response: model_service.ListModelEvaluationsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListModelEvaluationsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListModelEvaluationsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = model_service.ListModelEvaluationsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[model_service.ListModelEvaluationsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[model_evaluation.ModelEvaluation]: - async def async_generator(): - async for page in self.pages: - for response in page.model_evaluations: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListModelEvaluationSlicesPager: - """A pager for iterating through ``list_model_evaluation_slices`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListModelEvaluationSlicesResponse` object, and - provides an ``__iter__`` method to iterate through its - ``model_evaluation_slices`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListModelEvaluationSlices`` requests and continue to iterate - through the ``model_evaluation_slices`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListModelEvaluationSlicesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., model_service.ListModelEvaluationSlicesResponse], - request: model_service.ListModelEvaluationSlicesRequest, - response: model_service.ListModelEvaluationSlicesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListModelEvaluationSlicesRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListModelEvaluationSlicesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = model_service.ListModelEvaluationSlicesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[model_service.ListModelEvaluationSlicesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[model_evaluation_slice.ModelEvaluationSlice]: - for page in self.pages: - yield from page.model_evaluation_slices - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListModelEvaluationSlicesAsyncPager: - """A pager for iterating through ``list_model_evaluation_slices`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListModelEvaluationSlicesResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``model_evaluation_slices`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListModelEvaluationSlices`` requests and continue to iterate - through the ``model_evaluation_slices`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListModelEvaluationSlicesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[model_service.ListModelEvaluationSlicesResponse]], - request: model_service.ListModelEvaluationSlicesRequest, - response: model_service.ListModelEvaluationSlicesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListModelEvaluationSlicesRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListModelEvaluationSlicesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = model_service.ListModelEvaluationSlicesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[model_service.ListModelEvaluationSlicesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[model_evaluation_slice.ModelEvaluationSlice]: - async def async_generator(): - async for page in self.pages: - for response in page.model_evaluation_slices: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/__init__.py deleted file mode 100644 index 0f09224d3c..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import ModelServiceTransport -from .grpc import ModelServiceGrpcTransport -from .grpc_asyncio import ModelServiceGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[ModelServiceTransport]] -_transport_registry['grpc'] = ModelServiceGrpcTransport -_transport_registry['grpc_asyncio'] = ModelServiceGrpcAsyncIOTransport - -__all__ = ( - 'ModelServiceTransport', - 'ModelServiceGrpcTransport', - 'ModelServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/base.py deleted file mode 100644 index 07d2df5c3e..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/base.py +++ /dev/null @@ -1,283 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import pkg_resources - -import google.auth # type: ignore -import google.api_core -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.aiplatform_v1beta1.types import model -from google.cloud.aiplatform_v1beta1.types import model as gca_model -from google.cloud.aiplatform_v1beta1.types import model_evaluation -from google.cloud.aiplatform_v1beta1.types import model_evaluation_slice -from google.cloud.aiplatform_v1beta1.types import model_service -from google.longrunning import operations_pb2 # type: ignore - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-aiplatform', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -class ModelServiceTransport(abc.ABC): - """Abstract transport class for ModelService.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'aiplatform.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials are service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.upload_model: gapic_v1.method.wrap_method( - self.upload_model, - default_timeout=5.0, - client_info=client_info, - ), - self.get_model: gapic_v1.method.wrap_method( - self.get_model, - default_timeout=5.0, - client_info=client_info, - ), - self.list_models: gapic_v1.method.wrap_method( - self.list_models, - default_timeout=5.0, - client_info=client_info, - ), - self.update_model: gapic_v1.method.wrap_method( - self.update_model, - default_timeout=5.0, - client_info=client_info, - ), - self.delete_model: gapic_v1.method.wrap_method( - self.delete_model, - default_timeout=5.0, - client_info=client_info, - ), - self.export_model: gapic_v1.method.wrap_method( - self.export_model, - default_timeout=5.0, - client_info=client_info, - ), - self.get_model_evaluation: gapic_v1.method.wrap_method( - self.get_model_evaluation, - default_timeout=5.0, - client_info=client_info, - ), - self.list_model_evaluations: gapic_v1.method.wrap_method( - self.list_model_evaluations, - default_timeout=5.0, - client_info=client_info, - ), - self.get_model_evaluation_slice: gapic_v1.method.wrap_method( - self.get_model_evaluation_slice, - default_timeout=5.0, - client_info=client_info, - ), - self.list_model_evaluation_slices: gapic_v1.method.wrap_method( - self.list_model_evaluation_slices, - default_timeout=5.0, - client_info=client_info, - ), - } - - def close(self): - """Closes resources associated with the transport. - - .. warning:: - Only call this method if the transport is NOT shared - with other clients - this may cause errors in other clients! - """ - raise NotImplementedError() - - @property - def operations_client(self): - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def upload_model(self) -> Callable[ - [model_service.UploadModelRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def get_model(self) -> Callable[ - [model_service.GetModelRequest], - Union[ - model.Model, - Awaitable[model.Model] - ]]: - raise NotImplementedError() - - @property - def list_models(self) -> Callable[ - [model_service.ListModelsRequest], - Union[ - model_service.ListModelsResponse, - Awaitable[model_service.ListModelsResponse] - ]]: - raise NotImplementedError() - - @property - def update_model(self) -> Callable[ - [model_service.UpdateModelRequest], - Union[ - gca_model.Model, - Awaitable[gca_model.Model] - ]]: - raise NotImplementedError() - - @property - def delete_model(self) -> Callable[ - [model_service.DeleteModelRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def export_model(self) -> Callable[ - [model_service.ExportModelRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def get_model_evaluation(self) -> Callable[ - [model_service.GetModelEvaluationRequest], - Union[ - model_evaluation.ModelEvaluation, - Awaitable[model_evaluation.ModelEvaluation] - ]]: - raise NotImplementedError() - - @property - def list_model_evaluations(self) -> Callable[ - [model_service.ListModelEvaluationsRequest], - Union[ - model_service.ListModelEvaluationsResponse, - Awaitable[model_service.ListModelEvaluationsResponse] - ]]: - raise NotImplementedError() - - @property - def get_model_evaluation_slice(self) -> Callable[ - [model_service.GetModelEvaluationSliceRequest], - Union[ - model_evaluation_slice.ModelEvaluationSlice, - Awaitable[model_evaluation_slice.ModelEvaluationSlice] - ]]: - raise NotImplementedError() - - @property - def list_model_evaluation_slices(self) -> Callable[ - [model_service.ListModelEvaluationSlicesRequest], - Union[ - model_service.ListModelEvaluationSlicesResponse, - Awaitable[model_service.ListModelEvaluationSlicesResponse] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'ModelServiceTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc.py deleted file mode 100644 index 39f9a9c6ad..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc.py +++ /dev/null @@ -1,522 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers -from google.api_core import operations_v1 -from google.api_core import gapic_v1 -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.aiplatform_v1beta1.types import model -from google.cloud.aiplatform_v1beta1.types import model as gca_model -from google.cloud.aiplatform_v1beta1.types import model_evaluation -from google.cloud.aiplatform_v1beta1.types import model_evaluation_slice -from google.cloud.aiplatform_v1beta1.types import model_service -from google.longrunning import operations_pb2 # type: ignore -from .base import ModelServiceTransport, DEFAULT_CLIENT_INFO - - -class ModelServiceGrpcTransport(ModelServiceTransport): - """gRPC backend transport for ModelService. - - A service for managing Vertex AI's machine learning Models. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def upload_model(self) -> Callable[ - [model_service.UploadModelRequest], - operations_pb2.Operation]: - r"""Return a callable for the upload model method over gRPC. - - Uploads a Model artifact into Vertex AI. - - Returns: - Callable[[~.UploadModelRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'upload_model' not in self._stubs: - self._stubs['upload_model'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.ModelService/UploadModel', - request_serializer=model_service.UploadModelRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['upload_model'] - - @property - def get_model(self) -> Callable[ - [model_service.GetModelRequest], - model.Model]: - r"""Return a callable for the get model method over gRPC. - - Gets a Model. - - Returns: - Callable[[~.GetModelRequest], - ~.Model]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_model' not in self._stubs: - self._stubs['get_model'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.ModelService/GetModel', - request_serializer=model_service.GetModelRequest.serialize, - response_deserializer=model.Model.deserialize, - ) - return self._stubs['get_model'] - - @property - def list_models(self) -> Callable[ - [model_service.ListModelsRequest], - model_service.ListModelsResponse]: - r"""Return a callable for the list models method over gRPC. - - Lists Models in a Location. - - Returns: - Callable[[~.ListModelsRequest], - ~.ListModelsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_models' not in self._stubs: - self._stubs['list_models'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.ModelService/ListModels', - request_serializer=model_service.ListModelsRequest.serialize, - response_deserializer=model_service.ListModelsResponse.deserialize, - ) - return self._stubs['list_models'] - - @property - def update_model(self) -> Callable[ - [model_service.UpdateModelRequest], - gca_model.Model]: - r"""Return a callable for the update model method over gRPC. - - Updates a Model. - - Returns: - Callable[[~.UpdateModelRequest], - ~.Model]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_model' not in self._stubs: - self._stubs['update_model'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.ModelService/UpdateModel', - request_serializer=model_service.UpdateModelRequest.serialize, - response_deserializer=gca_model.Model.deserialize, - ) - return self._stubs['update_model'] - - @property - def delete_model(self) -> Callable[ - [model_service.DeleteModelRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete model method over gRPC. - - Deletes a Model. - - A model cannot be deleted if any - [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] resource - has a - [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] - based on the model in its - [deployed_models][google.cloud.aiplatform.v1beta1.Endpoint.deployed_models] - field. - - Returns: - Callable[[~.DeleteModelRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_model' not in self._stubs: - self._stubs['delete_model'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.ModelService/DeleteModel', - request_serializer=model_service.DeleteModelRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_model'] - - @property - def export_model(self) -> Callable[ - [model_service.ExportModelRequest], - operations_pb2.Operation]: - r"""Return a callable for the export model method over gRPC. - - Exports a trained, exportable Model to a location specified by - the user. A Model is considered to be exportable if it has at - least one [supported export - format][google.cloud.aiplatform.v1beta1.Model.supported_export_formats]. - - Returns: - Callable[[~.ExportModelRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'export_model' not in self._stubs: - self._stubs['export_model'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.ModelService/ExportModel', - request_serializer=model_service.ExportModelRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['export_model'] - - @property - def get_model_evaluation(self) -> Callable[ - [model_service.GetModelEvaluationRequest], - model_evaluation.ModelEvaluation]: - r"""Return a callable for the get model evaluation method over gRPC. - - Gets a ModelEvaluation. - - Returns: - Callable[[~.GetModelEvaluationRequest], - ~.ModelEvaluation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_model_evaluation' not in self._stubs: - self._stubs['get_model_evaluation'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.ModelService/GetModelEvaluation', - request_serializer=model_service.GetModelEvaluationRequest.serialize, - response_deserializer=model_evaluation.ModelEvaluation.deserialize, - ) - return self._stubs['get_model_evaluation'] - - @property - def list_model_evaluations(self) -> Callable[ - [model_service.ListModelEvaluationsRequest], - model_service.ListModelEvaluationsResponse]: - r"""Return a callable for the list model evaluations method over gRPC. - - Lists ModelEvaluations in a Model. - - Returns: - Callable[[~.ListModelEvaluationsRequest], - ~.ListModelEvaluationsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_model_evaluations' not in self._stubs: - self._stubs['list_model_evaluations'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.ModelService/ListModelEvaluations', - request_serializer=model_service.ListModelEvaluationsRequest.serialize, - response_deserializer=model_service.ListModelEvaluationsResponse.deserialize, - ) - return self._stubs['list_model_evaluations'] - - @property - def get_model_evaluation_slice(self) -> Callable[ - [model_service.GetModelEvaluationSliceRequest], - model_evaluation_slice.ModelEvaluationSlice]: - r"""Return a callable for the get model evaluation slice method over gRPC. - - Gets a ModelEvaluationSlice. - - Returns: - Callable[[~.GetModelEvaluationSliceRequest], - ~.ModelEvaluationSlice]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_model_evaluation_slice' not in self._stubs: - self._stubs['get_model_evaluation_slice'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.ModelService/GetModelEvaluationSlice', - request_serializer=model_service.GetModelEvaluationSliceRequest.serialize, - response_deserializer=model_evaluation_slice.ModelEvaluationSlice.deserialize, - ) - return self._stubs['get_model_evaluation_slice'] - - @property - def list_model_evaluation_slices(self) -> Callable[ - [model_service.ListModelEvaluationSlicesRequest], - model_service.ListModelEvaluationSlicesResponse]: - r"""Return a callable for the list model evaluation slices method over gRPC. - - Lists ModelEvaluationSlices in a ModelEvaluation. - - Returns: - Callable[[~.ListModelEvaluationSlicesRequest], - ~.ListModelEvaluationSlicesResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_model_evaluation_slices' not in self._stubs: - self._stubs['list_model_evaluation_slices'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.ModelService/ListModelEvaluationSlices', - request_serializer=model_service.ListModelEvaluationSlicesRequest.serialize, - response_deserializer=model_service.ListModelEvaluationSlicesResponse.deserialize, - ) - return self._stubs['list_model_evaluation_slices'] - - def close(self): - self.grpc_channel.close() - -__all__ = ( - 'ModelServiceGrpcTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc_asyncio.py deleted file mode 100644 index 8281a4249a..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/model_service/transports/grpc_asyncio.py +++ /dev/null @@ -1,526 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers_async -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.aiplatform_v1beta1.types import model -from google.cloud.aiplatform_v1beta1.types import model as gca_model -from google.cloud.aiplatform_v1beta1.types import model_evaluation -from google.cloud.aiplatform_v1beta1.types import model_evaluation_slice -from google.cloud.aiplatform_v1beta1.types import model_service -from google.longrunning import operations_pb2 # type: ignore -from .base import ModelServiceTransport, DEFAULT_CLIENT_INFO -from .grpc import ModelServiceGrpcTransport - - -class ModelServiceGrpcAsyncIOTransport(ModelServiceTransport): - """gRPC AsyncIO backend transport for ModelService. - - A service for managing Vertex AI's machine learning Models. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsAsyncClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def upload_model(self) -> Callable[ - [model_service.UploadModelRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the upload model method over gRPC. - - Uploads a Model artifact into Vertex AI. - - Returns: - Callable[[~.UploadModelRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'upload_model' not in self._stubs: - self._stubs['upload_model'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.ModelService/UploadModel', - request_serializer=model_service.UploadModelRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['upload_model'] - - @property - def get_model(self) -> Callable[ - [model_service.GetModelRequest], - Awaitable[model.Model]]: - r"""Return a callable for the get model method over gRPC. - - Gets a Model. - - Returns: - Callable[[~.GetModelRequest], - Awaitable[~.Model]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_model' not in self._stubs: - self._stubs['get_model'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.ModelService/GetModel', - request_serializer=model_service.GetModelRequest.serialize, - response_deserializer=model.Model.deserialize, - ) - return self._stubs['get_model'] - - @property - def list_models(self) -> Callable[ - [model_service.ListModelsRequest], - Awaitable[model_service.ListModelsResponse]]: - r"""Return a callable for the list models method over gRPC. - - Lists Models in a Location. - - Returns: - Callable[[~.ListModelsRequest], - Awaitable[~.ListModelsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_models' not in self._stubs: - self._stubs['list_models'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.ModelService/ListModels', - request_serializer=model_service.ListModelsRequest.serialize, - response_deserializer=model_service.ListModelsResponse.deserialize, - ) - return self._stubs['list_models'] - - @property - def update_model(self) -> Callable[ - [model_service.UpdateModelRequest], - Awaitable[gca_model.Model]]: - r"""Return a callable for the update model method over gRPC. - - Updates a Model. - - Returns: - Callable[[~.UpdateModelRequest], - Awaitable[~.Model]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_model' not in self._stubs: - self._stubs['update_model'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.ModelService/UpdateModel', - request_serializer=model_service.UpdateModelRequest.serialize, - response_deserializer=gca_model.Model.deserialize, - ) - return self._stubs['update_model'] - - @property - def delete_model(self) -> Callable[ - [model_service.DeleteModelRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete model method over gRPC. - - Deletes a Model. - - A model cannot be deleted if any - [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] resource - has a - [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] - based on the model in its - [deployed_models][google.cloud.aiplatform.v1beta1.Endpoint.deployed_models] - field. - - Returns: - Callable[[~.DeleteModelRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_model' not in self._stubs: - self._stubs['delete_model'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.ModelService/DeleteModel', - request_serializer=model_service.DeleteModelRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_model'] - - @property - def export_model(self) -> Callable[ - [model_service.ExportModelRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the export model method over gRPC. - - Exports a trained, exportable Model to a location specified by - the user. A Model is considered to be exportable if it has at - least one [supported export - format][google.cloud.aiplatform.v1beta1.Model.supported_export_formats]. - - Returns: - Callable[[~.ExportModelRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'export_model' not in self._stubs: - self._stubs['export_model'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.ModelService/ExportModel', - request_serializer=model_service.ExportModelRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['export_model'] - - @property - def get_model_evaluation(self) -> Callable[ - [model_service.GetModelEvaluationRequest], - Awaitable[model_evaluation.ModelEvaluation]]: - r"""Return a callable for the get model evaluation method over gRPC. - - Gets a ModelEvaluation. - - Returns: - Callable[[~.GetModelEvaluationRequest], - Awaitable[~.ModelEvaluation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_model_evaluation' not in self._stubs: - self._stubs['get_model_evaluation'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.ModelService/GetModelEvaluation', - request_serializer=model_service.GetModelEvaluationRequest.serialize, - response_deserializer=model_evaluation.ModelEvaluation.deserialize, - ) - return self._stubs['get_model_evaluation'] - - @property - def list_model_evaluations(self) -> Callable[ - [model_service.ListModelEvaluationsRequest], - Awaitable[model_service.ListModelEvaluationsResponse]]: - r"""Return a callable for the list model evaluations method over gRPC. - - Lists ModelEvaluations in a Model. - - Returns: - Callable[[~.ListModelEvaluationsRequest], - Awaitable[~.ListModelEvaluationsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_model_evaluations' not in self._stubs: - self._stubs['list_model_evaluations'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.ModelService/ListModelEvaluations', - request_serializer=model_service.ListModelEvaluationsRequest.serialize, - response_deserializer=model_service.ListModelEvaluationsResponse.deserialize, - ) - return self._stubs['list_model_evaluations'] - - @property - def get_model_evaluation_slice(self) -> Callable[ - [model_service.GetModelEvaluationSliceRequest], - Awaitable[model_evaluation_slice.ModelEvaluationSlice]]: - r"""Return a callable for the get model evaluation slice method over gRPC. - - Gets a ModelEvaluationSlice. - - Returns: - Callable[[~.GetModelEvaluationSliceRequest], - Awaitable[~.ModelEvaluationSlice]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_model_evaluation_slice' not in self._stubs: - self._stubs['get_model_evaluation_slice'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.ModelService/GetModelEvaluationSlice', - request_serializer=model_service.GetModelEvaluationSliceRequest.serialize, - response_deserializer=model_evaluation_slice.ModelEvaluationSlice.deserialize, - ) - return self._stubs['get_model_evaluation_slice'] - - @property - def list_model_evaluation_slices(self) -> Callable[ - [model_service.ListModelEvaluationSlicesRequest], - Awaitable[model_service.ListModelEvaluationSlicesResponse]]: - r"""Return a callable for the list model evaluation slices method over gRPC. - - Lists ModelEvaluationSlices in a ModelEvaluation. - - Returns: - Callable[[~.ListModelEvaluationSlicesRequest], - Awaitable[~.ListModelEvaluationSlicesResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_model_evaluation_slices' not in self._stubs: - self._stubs['list_model_evaluation_slices'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.ModelService/ListModelEvaluationSlices', - request_serializer=model_service.ListModelEvaluationSlicesRequest.serialize, - response_deserializer=model_service.ListModelEvaluationSlicesResponse.deserialize, - ) - return self._stubs['list_model_evaluation_slices'] - - def close(self): - return self.grpc_channel.close() - - -__all__ = ( - 'ModelServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/__init__.py deleted file mode 100644 index 539616023d..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import PipelineServiceClient -from .async_client import PipelineServiceAsyncClient - -__all__ = ( - 'PipelineServiceClient', - 'PipelineServiceAsyncClient', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py deleted file mode 100644 index 8edae5b9bd..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/async_client.py +++ /dev/null @@ -1,1076 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core.client_options import ClientOptions -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.pipeline_service import pagers -from google.cloud.aiplatform_v1beta1.types import encryption_spec -from google.cloud.aiplatform_v1beta1.types import model -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.cloud.aiplatform_v1beta1.types import pipeline_job -from google.cloud.aiplatform_v1beta1.types import pipeline_job as gca_pipeline_job -from google.cloud.aiplatform_v1beta1.types import pipeline_service -from google.cloud.aiplatform_v1beta1.types import pipeline_state -from google.cloud.aiplatform_v1beta1.types import training_pipeline -from google.cloud.aiplatform_v1beta1.types import training_pipeline as gca_training_pipeline -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore -from .transports.base import PipelineServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import PipelineServiceGrpcAsyncIOTransport -from .client import PipelineServiceClient - - -class PipelineServiceAsyncClient: - """A service for creating and managing Vertex AI's pipelines. This - includes both ``TrainingPipeline`` resources (used for AutoML and - custom training) and ``PipelineJob`` resources (used for Vertex AI - Pipelines). - """ - - _client: PipelineServiceClient - - DEFAULT_ENDPOINT = PipelineServiceClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = PipelineServiceClient.DEFAULT_MTLS_ENDPOINT - - artifact_path = staticmethod(PipelineServiceClient.artifact_path) - parse_artifact_path = staticmethod(PipelineServiceClient.parse_artifact_path) - context_path = staticmethod(PipelineServiceClient.context_path) - parse_context_path = staticmethod(PipelineServiceClient.parse_context_path) - custom_job_path = staticmethod(PipelineServiceClient.custom_job_path) - parse_custom_job_path = staticmethod(PipelineServiceClient.parse_custom_job_path) - endpoint_path = staticmethod(PipelineServiceClient.endpoint_path) - parse_endpoint_path = staticmethod(PipelineServiceClient.parse_endpoint_path) - execution_path = staticmethod(PipelineServiceClient.execution_path) - parse_execution_path = staticmethod(PipelineServiceClient.parse_execution_path) - model_path = staticmethod(PipelineServiceClient.model_path) - parse_model_path = staticmethod(PipelineServiceClient.parse_model_path) - network_path = staticmethod(PipelineServiceClient.network_path) - parse_network_path = staticmethod(PipelineServiceClient.parse_network_path) - pipeline_job_path = staticmethod(PipelineServiceClient.pipeline_job_path) - parse_pipeline_job_path = staticmethod(PipelineServiceClient.parse_pipeline_job_path) - training_pipeline_path = staticmethod(PipelineServiceClient.training_pipeline_path) - parse_training_pipeline_path = staticmethod(PipelineServiceClient.parse_training_pipeline_path) - common_billing_account_path = staticmethod(PipelineServiceClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(PipelineServiceClient.parse_common_billing_account_path) - common_folder_path = staticmethod(PipelineServiceClient.common_folder_path) - parse_common_folder_path = staticmethod(PipelineServiceClient.parse_common_folder_path) - common_organization_path = staticmethod(PipelineServiceClient.common_organization_path) - parse_common_organization_path = staticmethod(PipelineServiceClient.parse_common_organization_path) - common_project_path = staticmethod(PipelineServiceClient.common_project_path) - parse_common_project_path = staticmethod(PipelineServiceClient.parse_common_project_path) - common_location_path = staticmethod(PipelineServiceClient.common_location_path) - parse_common_location_path = staticmethod(PipelineServiceClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - PipelineServiceAsyncClient: The constructed client. - """ - return PipelineServiceClient.from_service_account_info.__func__(PipelineServiceAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - PipelineServiceAsyncClient: The constructed client. - """ - return PipelineServiceClient.from_service_account_file.__func__(PipelineServiceAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> PipelineServiceTransport: - """Returns the transport used by the client instance. - - Returns: - PipelineServiceTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(PipelineServiceClient).get_transport_class, type(PipelineServiceClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, PipelineServiceTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the pipeline service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.PipelineServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = PipelineServiceClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def create_training_pipeline(self, - request: Union[pipeline_service.CreateTrainingPipelineRequest, dict] = None, - *, - parent: str = None, - training_pipeline: gca_training_pipeline.TrainingPipeline = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_training_pipeline.TrainingPipeline: - r"""Creates a TrainingPipeline. A created - TrainingPipeline right away will be attempted to be run. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CreateTrainingPipelineRequest, dict]): - The request object. Request message for - [PipelineService.CreateTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.CreateTrainingPipeline]. - parent (:class:`str`): - Required. The resource name of the Location to create - the TrainingPipeline in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - training_pipeline (:class:`google.cloud.aiplatform_v1beta1.types.TrainingPipeline`): - Required. The TrainingPipeline to - create. - - This corresponds to the ``training_pipeline`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.TrainingPipeline: - The TrainingPipeline orchestrates tasks associated with training a Model. It - always executes the training task, and optionally may - also export data from Vertex AI's Dataset which - becomes the training input, - [upload][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] - the Model to Vertex AI, and evaluate the Model. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, training_pipeline]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = pipeline_service.CreateTrainingPipelineRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if training_pipeline is not None: - request.training_pipeline = training_pipeline - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_training_pipeline, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_training_pipeline(self, - request: Union[pipeline_service.GetTrainingPipelineRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> training_pipeline.TrainingPipeline: - r"""Gets a TrainingPipeline. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.GetTrainingPipelineRequest, dict]): - The request object. Request message for - [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.GetTrainingPipeline]. - name (:class:`str`): - Required. The name of the TrainingPipeline resource. - Format: - ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.TrainingPipeline: - The TrainingPipeline orchestrates tasks associated with training a Model. It - always executes the training task, and optionally may - also export data from Vertex AI's Dataset which - becomes the training input, - [upload][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] - the Model to Vertex AI, and evaluate the Model. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = pipeline_service.GetTrainingPipelineRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_training_pipeline, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_training_pipelines(self, - request: Union[pipeline_service.ListTrainingPipelinesRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListTrainingPipelinesAsyncPager: - r"""Lists TrainingPipelines in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListTrainingPipelinesRequest, dict]): - The request object. Request message for - [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1beta1.PipelineService.ListTrainingPipelines]. - parent (:class:`str`): - Required. The resource name of the Location to list the - TrainingPipelines from. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.pipeline_service.pagers.ListTrainingPipelinesAsyncPager: - Response message for - [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1beta1.PipelineService.ListTrainingPipelines] - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = pipeline_service.ListTrainingPipelinesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_training_pipelines, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListTrainingPipelinesAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_training_pipeline(self, - request: Union[pipeline_service.DeleteTrainingPipelineRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a TrainingPipeline. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.DeleteTrainingPipelineRequest, dict]): - The request object. Request message for - [PipelineService.DeleteTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.DeleteTrainingPipeline]. - name (:class:`str`): - Required. The name of the TrainingPipeline resource to - be deleted. Format: - ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = pipeline_service.DeleteTrainingPipelineRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_training_pipeline, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def cancel_training_pipeline(self, - request: Union[pipeline_service.CancelTrainingPipelineRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Cancels a TrainingPipeline. Starts asynchronous cancellation on - the TrainingPipeline. The server makes a best effort to cancel - the pipeline, but success is not guaranteed. Clients can use - [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.GetTrainingPipeline] - or other methods to check whether the cancellation succeeded or - whether the pipeline completed despite cancellation. On - successful cancellation, the TrainingPipeline is not deleted; - instead it becomes a pipeline with a - [TrainingPipeline.error][google.cloud.aiplatform.v1beta1.TrainingPipeline.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [TrainingPipeline.state][google.cloud.aiplatform.v1beta1.TrainingPipeline.state] - is set to ``CANCELLED``. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CancelTrainingPipelineRequest, dict]): - The request object. Request message for - [PipelineService.CancelTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.CancelTrainingPipeline]. - name (:class:`str`): - Required. The name of the TrainingPipeline to cancel. - Format: - ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = pipeline_service.CancelTrainingPipelineRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.cancel_training_pipeline, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - async def create_pipeline_job(self, - request: Union[pipeline_service.CreatePipelineJobRequest, dict] = None, - *, - parent: str = None, - pipeline_job: gca_pipeline_job.PipelineJob = None, - pipeline_job_id: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_pipeline_job.PipelineJob: - r"""Creates a PipelineJob. A PipelineJob will run - immediately when created. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CreatePipelineJobRequest, dict]): - The request object. Request message for - [PipelineService.CreatePipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.CreatePipelineJob]. - parent (:class:`str`): - Required. The resource name of the Location to create - the PipelineJob in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - pipeline_job (:class:`google.cloud.aiplatform_v1beta1.types.PipelineJob`): - Required. The PipelineJob to create. - This corresponds to the ``pipeline_job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - pipeline_job_id (:class:`str`): - The ID to use for the PipelineJob, which will become the - final component of the PipelineJob name. If not - provided, an ID will be automatically generated. - - This value should be less than 128 characters, and valid - characters are /[a-z][0-9]-/. - - This corresponds to the ``pipeline_job_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.PipelineJob: - An instance of a machine learning - PipelineJob. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, pipeline_job, pipeline_job_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = pipeline_service.CreatePipelineJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if pipeline_job is not None: - request.pipeline_job = pipeline_job - if pipeline_job_id is not None: - request.pipeline_job_id = pipeline_job_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_pipeline_job, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_pipeline_job(self, - request: Union[pipeline_service.GetPipelineJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pipeline_job.PipelineJob: - r"""Gets a PipelineJob. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.GetPipelineJobRequest, dict]): - The request object. Request message for - [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.GetPipelineJob]. - name (:class:`str`): - Required. The name of the PipelineJob resource. Format: - ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.PipelineJob: - An instance of a machine learning - PipelineJob. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = pipeline_service.GetPipelineJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_pipeline_job, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_pipeline_jobs(self, - request: Union[pipeline_service.ListPipelineJobsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListPipelineJobsAsyncPager: - r"""Lists PipelineJobs in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListPipelineJobsRequest, dict]): - The request object. Request message for - [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1beta1.PipelineService.ListPipelineJobs]. - parent (:class:`str`): - Required. The resource name of the Location to list the - PipelineJobs from. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.pipeline_service.pagers.ListPipelineJobsAsyncPager: - Response message for - [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1beta1.PipelineService.ListPipelineJobs] - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = pipeline_service.ListPipelineJobsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_pipeline_jobs, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListPipelineJobsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_pipeline_job(self, - request: Union[pipeline_service.DeletePipelineJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a PipelineJob. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.DeletePipelineJobRequest, dict]): - The request object. Request message for - [PipelineService.DeletePipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.DeletePipelineJob]. - name (:class:`str`): - Required. The name of the PipelineJob resource to be - deleted. Format: - ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = pipeline_service.DeletePipelineJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_pipeline_job, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def cancel_pipeline_job(self, - request: Union[pipeline_service.CancelPipelineJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Cancels a PipelineJob. Starts asynchronous cancellation on the - PipelineJob. The server makes a best effort to cancel the - pipeline, but success is not guaranteed. Clients can use - [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.GetPipelineJob] - or other methods to check whether the cancellation succeeded or - whether the pipeline completed despite cancellation. On - successful cancellation, the PipelineJob is not deleted; instead - it becomes a pipeline with a - [PipelineJob.error][google.cloud.aiplatform.v1beta1.PipelineJob.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [PipelineJob.state][google.cloud.aiplatform.v1beta1.PipelineJob.state] - is set to ``CANCELLED``. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CancelPipelineJobRequest, dict]): - The request object. Request message for - [PipelineService.CancelPipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.CancelPipelineJob]. - name (:class:`str`): - Required. The name of the PipelineJob to cancel. Format: - ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = pipeline_service.CancelPipelineJobRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.cancel_pipeline_job, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - async def __aenter__(self): - return self - - async def __aexit__(self, exc_type, exc, tb): - await self.transport.close() - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "PipelineServiceAsyncClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/client.py deleted file mode 100644 index 616f5d4a9d..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/client.py +++ /dev/null @@ -1,1346 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import os -import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.pipeline_service import pagers -from google.cloud.aiplatform_v1beta1.types import encryption_spec -from google.cloud.aiplatform_v1beta1.types import model -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.cloud.aiplatform_v1beta1.types import pipeline_job -from google.cloud.aiplatform_v1beta1.types import pipeline_job as gca_pipeline_job -from google.cloud.aiplatform_v1beta1.types import pipeline_service -from google.cloud.aiplatform_v1beta1.types import pipeline_state -from google.cloud.aiplatform_v1beta1.types import training_pipeline -from google.cloud.aiplatform_v1beta1.types import training_pipeline as gca_training_pipeline -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore -from .transports.base import PipelineServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import PipelineServiceGrpcTransport -from .transports.grpc_asyncio import PipelineServiceGrpcAsyncIOTransport - - -class PipelineServiceClientMeta(type): - """Metaclass for the PipelineService client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[PipelineServiceTransport]] - _transport_registry["grpc"] = PipelineServiceGrpcTransport - _transport_registry["grpc_asyncio"] = PipelineServiceGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[PipelineServiceTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class PipelineServiceClient(metaclass=PipelineServiceClientMeta): - """A service for creating and managing Vertex AI's pipelines. This - includes both ``TrainingPipeline`` resources (used for AutoML and - custom training) and ``PipelineJob`` resources (used for Vertex AI - Pipelines). - """ - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "aiplatform.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - PipelineServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - PipelineServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> PipelineServiceTransport: - """Returns the transport used by the client instance. - - Returns: - PipelineServiceTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def artifact_path(project: str,location: str,metadata_store: str,artifact: str,) -> str: - """Returns a fully-qualified artifact string.""" - return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}".format(project=project, location=location, metadata_store=metadata_store, artifact=artifact, ) - - @staticmethod - def parse_artifact_path(path: str) -> Dict[str,str]: - """Parses a artifact path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/artifacts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def context_path(project: str,location: str,metadata_store: str,context: str,) -> str: - """Returns a fully-qualified context string.""" - return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}".format(project=project, location=location, metadata_store=metadata_store, context=context, ) - - @staticmethod - def parse_context_path(path: str) -> Dict[str,str]: - """Parses a context path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/contexts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def custom_job_path(project: str,location: str,custom_job: str,) -> str: - """Returns a fully-qualified custom_job string.""" - return "projects/{project}/locations/{location}/customJobs/{custom_job}".format(project=project, location=location, custom_job=custom_job, ) - - @staticmethod - def parse_custom_job_path(path: str) -> Dict[str,str]: - """Parses a custom_job path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/customJobs/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def endpoint_path(project: str,location: str,endpoint: str,) -> str: - """Returns a fully-qualified endpoint string.""" - return "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) - - @staticmethod - def parse_endpoint_path(path: str) -> Dict[str,str]: - """Parses a endpoint path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/endpoints/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def execution_path(project: str,location: str,metadata_store: str,execution: str,) -> str: - """Returns a fully-qualified execution string.""" - return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}".format(project=project, location=location, metadata_store=metadata_store, execution=execution, ) - - @staticmethod - def parse_execution_path(path: str) -> Dict[str,str]: - """Parses a execution path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/executions/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def model_path(project: str,location: str,model: str,) -> str: - """Returns a fully-qualified model string.""" - return "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) - - @staticmethod - def parse_model_path(path: str) -> Dict[str,str]: - """Parses a model path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def network_path(project: str,network: str,) -> str: - """Returns a fully-qualified network string.""" - return "projects/{project}/global/networks/{network}".format(project=project, network=network, ) - - @staticmethod - def parse_network_path(path: str) -> Dict[str,str]: - """Parses a network path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/global/networks/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def pipeline_job_path(project: str,location: str,pipeline_job: str,) -> str: - """Returns a fully-qualified pipeline_job string.""" - return "projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}".format(project=project, location=location, pipeline_job=pipeline_job, ) - - @staticmethod - def parse_pipeline_job_path(path: str) -> Dict[str,str]: - """Parses a pipeline_job path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/pipelineJobs/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def training_pipeline_path(project: str,location: str,training_pipeline: str,) -> str: - """Returns a fully-qualified training_pipeline string.""" - return "projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}".format(project=project, location=location, training_pipeline=training_pipeline, ) - - @staticmethod - def parse_training_pipeline_path(path: str) -> Dict[str,str]: - """Parses a training_pipeline path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/trainingPipelines/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, PipelineServiceTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the pipeline service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, PipelineServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): - raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, PipelineServiceTransport): - # transport is a PipelineServiceTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - always_use_jwt_access=True, - ) - - def create_training_pipeline(self, - request: Union[pipeline_service.CreateTrainingPipelineRequest, dict] = None, - *, - parent: str = None, - training_pipeline: gca_training_pipeline.TrainingPipeline = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_training_pipeline.TrainingPipeline: - r"""Creates a TrainingPipeline. A created - TrainingPipeline right away will be attempted to be run. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CreateTrainingPipelineRequest, dict]): - The request object. Request message for - [PipelineService.CreateTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.CreateTrainingPipeline]. - parent (str): - Required. The resource name of the Location to create - the TrainingPipeline in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - training_pipeline (google.cloud.aiplatform_v1beta1.types.TrainingPipeline): - Required. The TrainingPipeline to - create. - - This corresponds to the ``training_pipeline`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.TrainingPipeline: - The TrainingPipeline orchestrates tasks associated with training a Model. It - always executes the training task, and optionally may - also export data from Vertex AI's Dataset which - becomes the training input, - [upload][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] - the Model to Vertex AI, and evaluate the Model. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, training_pipeline]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a pipeline_service.CreateTrainingPipelineRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, pipeline_service.CreateTrainingPipelineRequest): - request = pipeline_service.CreateTrainingPipelineRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if training_pipeline is not None: - request.training_pipeline = training_pipeline - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_training_pipeline] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_training_pipeline(self, - request: Union[pipeline_service.GetTrainingPipelineRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> training_pipeline.TrainingPipeline: - r"""Gets a TrainingPipeline. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.GetTrainingPipelineRequest, dict]): - The request object. Request message for - [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.GetTrainingPipeline]. - name (str): - Required. The name of the TrainingPipeline resource. - Format: - ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.TrainingPipeline: - The TrainingPipeline orchestrates tasks associated with training a Model. It - always executes the training task, and optionally may - also export data from Vertex AI's Dataset which - becomes the training input, - [upload][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] - the Model to Vertex AI, and evaluate the Model. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a pipeline_service.GetTrainingPipelineRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, pipeline_service.GetTrainingPipelineRequest): - request = pipeline_service.GetTrainingPipelineRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_training_pipeline] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_training_pipelines(self, - request: Union[pipeline_service.ListTrainingPipelinesRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListTrainingPipelinesPager: - r"""Lists TrainingPipelines in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListTrainingPipelinesRequest, dict]): - The request object. Request message for - [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1beta1.PipelineService.ListTrainingPipelines]. - parent (str): - Required. The resource name of the Location to list the - TrainingPipelines from. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.pipeline_service.pagers.ListTrainingPipelinesPager: - Response message for - [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1beta1.PipelineService.ListTrainingPipelines] - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a pipeline_service.ListTrainingPipelinesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, pipeline_service.ListTrainingPipelinesRequest): - request = pipeline_service.ListTrainingPipelinesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_training_pipelines] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListTrainingPipelinesPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_training_pipeline(self, - request: Union[pipeline_service.DeleteTrainingPipelineRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a TrainingPipeline. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.DeleteTrainingPipelineRequest, dict]): - The request object. Request message for - [PipelineService.DeleteTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.DeleteTrainingPipeline]. - name (str): - Required. The name of the TrainingPipeline resource to - be deleted. Format: - ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a pipeline_service.DeleteTrainingPipelineRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, pipeline_service.DeleteTrainingPipelineRequest): - request = pipeline_service.DeleteTrainingPipelineRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_training_pipeline] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def cancel_training_pipeline(self, - request: Union[pipeline_service.CancelTrainingPipelineRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Cancels a TrainingPipeline. Starts asynchronous cancellation on - the TrainingPipeline. The server makes a best effort to cancel - the pipeline, but success is not guaranteed. Clients can use - [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.GetTrainingPipeline] - or other methods to check whether the cancellation succeeded or - whether the pipeline completed despite cancellation. On - successful cancellation, the TrainingPipeline is not deleted; - instead it becomes a pipeline with a - [TrainingPipeline.error][google.cloud.aiplatform.v1beta1.TrainingPipeline.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [TrainingPipeline.state][google.cloud.aiplatform.v1beta1.TrainingPipeline.state] - is set to ``CANCELLED``. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CancelTrainingPipelineRequest, dict]): - The request object. Request message for - [PipelineService.CancelTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.CancelTrainingPipeline]. - name (str): - Required. The name of the TrainingPipeline to cancel. - Format: - ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a pipeline_service.CancelTrainingPipelineRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, pipeline_service.CancelTrainingPipelineRequest): - request = pipeline_service.CancelTrainingPipelineRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.cancel_training_pipeline] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - def create_pipeline_job(self, - request: Union[pipeline_service.CreatePipelineJobRequest, dict] = None, - *, - parent: str = None, - pipeline_job: gca_pipeline_job.PipelineJob = None, - pipeline_job_id: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_pipeline_job.PipelineJob: - r"""Creates a PipelineJob. A PipelineJob will run - immediately when created. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CreatePipelineJobRequest, dict]): - The request object. Request message for - [PipelineService.CreatePipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.CreatePipelineJob]. - parent (str): - Required. The resource name of the Location to create - the PipelineJob in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - pipeline_job (google.cloud.aiplatform_v1beta1.types.PipelineJob): - Required. The PipelineJob to create. - This corresponds to the ``pipeline_job`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - pipeline_job_id (str): - The ID to use for the PipelineJob, which will become the - final component of the PipelineJob name. If not - provided, an ID will be automatically generated. - - This value should be less than 128 characters, and valid - characters are /[a-z][0-9]-/. - - This corresponds to the ``pipeline_job_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.PipelineJob: - An instance of a machine learning - PipelineJob. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, pipeline_job, pipeline_job_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a pipeline_service.CreatePipelineJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, pipeline_service.CreatePipelineJobRequest): - request = pipeline_service.CreatePipelineJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if pipeline_job is not None: - request.pipeline_job = pipeline_job - if pipeline_job_id is not None: - request.pipeline_job_id = pipeline_job_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_pipeline_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_pipeline_job(self, - request: Union[pipeline_service.GetPipelineJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pipeline_job.PipelineJob: - r"""Gets a PipelineJob. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.GetPipelineJobRequest, dict]): - The request object. Request message for - [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.GetPipelineJob]. - name (str): - Required. The name of the PipelineJob resource. Format: - ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.PipelineJob: - An instance of a machine learning - PipelineJob. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a pipeline_service.GetPipelineJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, pipeline_service.GetPipelineJobRequest): - request = pipeline_service.GetPipelineJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_pipeline_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_pipeline_jobs(self, - request: Union[pipeline_service.ListPipelineJobsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListPipelineJobsPager: - r"""Lists PipelineJobs in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListPipelineJobsRequest, dict]): - The request object. Request message for - [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1beta1.PipelineService.ListPipelineJobs]. - parent (str): - Required. The resource name of the Location to list the - PipelineJobs from. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.pipeline_service.pagers.ListPipelineJobsPager: - Response message for - [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1beta1.PipelineService.ListPipelineJobs] - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a pipeline_service.ListPipelineJobsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, pipeline_service.ListPipelineJobsRequest): - request = pipeline_service.ListPipelineJobsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_pipeline_jobs] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListPipelineJobsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_pipeline_job(self, - request: Union[pipeline_service.DeletePipelineJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a PipelineJob. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.DeletePipelineJobRequest, dict]): - The request object. Request message for - [PipelineService.DeletePipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.DeletePipelineJob]. - name (str): - Required. The name of the PipelineJob resource to be - deleted. Format: - ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a pipeline_service.DeletePipelineJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, pipeline_service.DeletePipelineJobRequest): - request = pipeline_service.DeletePipelineJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_pipeline_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def cancel_pipeline_job(self, - request: Union[pipeline_service.CancelPipelineJobRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Cancels a PipelineJob. Starts asynchronous cancellation on the - PipelineJob. The server makes a best effort to cancel the - pipeline, but success is not guaranteed. Clients can use - [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.GetPipelineJob] - or other methods to check whether the cancellation succeeded or - whether the pipeline completed despite cancellation. On - successful cancellation, the PipelineJob is not deleted; instead - it becomes a pipeline with a - [PipelineJob.error][google.cloud.aiplatform.v1beta1.PipelineJob.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [PipelineJob.state][google.cloud.aiplatform.v1beta1.PipelineJob.state] - is set to ``CANCELLED``. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CancelPipelineJobRequest, dict]): - The request object. Request message for - [PipelineService.CancelPipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.CancelPipelineJob]. - name (str): - Required. The name of the PipelineJob to cancel. Format: - ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a pipeline_service.CancelPipelineJobRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, pipeline_service.CancelPipelineJobRequest): - request = pipeline_service.CancelPipelineJobRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.cancel_pipeline_job] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - """Releases underlying transport's resources. - - .. warning:: - ONLY use as a context manager if the transport is NOT shared - with other clients! Exiting the with block will CLOSE the transport - and may cause errors in other clients! - """ - self.transport.close() - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "PipelineServiceClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/pagers.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/pagers.py deleted file mode 100644 index e591c75099..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/pagers.py +++ /dev/null @@ -1,264 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator - -from google.cloud.aiplatform_v1beta1.types import pipeline_job -from google.cloud.aiplatform_v1beta1.types import pipeline_service -from google.cloud.aiplatform_v1beta1.types import training_pipeline - - -class ListTrainingPipelinesPager: - """A pager for iterating through ``list_training_pipelines`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListTrainingPipelinesResponse` object, and - provides an ``__iter__`` method to iterate through its - ``training_pipelines`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListTrainingPipelines`` requests and continue to iterate - through the ``training_pipelines`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListTrainingPipelinesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., pipeline_service.ListTrainingPipelinesResponse], - request: pipeline_service.ListTrainingPipelinesRequest, - response: pipeline_service.ListTrainingPipelinesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListTrainingPipelinesRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListTrainingPipelinesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = pipeline_service.ListTrainingPipelinesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[pipeline_service.ListTrainingPipelinesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[training_pipeline.TrainingPipeline]: - for page in self.pages: - yield from page.training_pipelines - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListTrainingPipelinesAsyncPager: - """A pager for iterating through ``list_training_pipelines`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListTrainingPipelinesResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``training_pipelines`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListTrainingPipelines`` requests and continue to iterate - through the ``training_pipelines`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListTrainingPipelinesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[pipeline_service.ListTrainingPipelinesResponse]], - request: pipeline_service.ListTrainingPipelinesRequest, - response: pipeline_service.ListTrainingPipelinesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListTrainingPipelinesRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListTrainingPipelinesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = pipeline_service.ListTrainingPipelinesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[pipeline_service.ListTrainingPipelinesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[training_pipeline.TrainingPipeline]: - async def async_generator(): - async for page in self.pages: - for response in page.training_pipelines: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListPipelineJobsPager: - """A pager for iterating through ``list_pipeline_jobs`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListPipelineJobsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``pipeline_jobs`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListPipelineJobs`` requests and continue to iterate - through the ``pipeline_jobs`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListPipelineJobsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., pipeline_service.ListPipelineJobsResponse], - request: pipeline_service.ListPipelineJobsRequest, - response: pipeline_service.ListPipelineJobsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListPipelineJobsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListPipelineJobsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = pipeline_service.ListPipelineJobsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[pipeline_service.ListPipelineJobsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[pipeline_job.PipelineJob]: - for page in self.pages: - yield from page.pipeline_jobs - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListPipelineJobsAsyncPager: - """A pager for iterating through ``list_pipeline_jobs`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListPipelineJobsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``pipeline_jobs`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListPipelineJobs`` requests and continue to iterate - through the ``pipeline_jobs`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListPipelineJobsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[pipeline_service.ListPipelineJobsResponse]], - request: pipeline_service.ListPipelineJobsRequest, - response: pipeline_service.ListPipelineJobsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListPipelineJobsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListPipelineJobsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = pipeline_service.ListPipelineJobsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[pipeline_service.ListPipelineJobsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[pipeline_job.PipelineJob]: - async def async_generator(): - async for page in self.pages: - for response in page.pipeline_jobs: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/__init__.py deleted file mode 100644 index 77051d8254..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import PipelineServiceTransport -from .grpc import PipelineServiceGrpcTransport -from .grpc_asyncio import PipelineServiceGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[PipelineServiceTransport]] -_transport_registry['grpc'] = PipelineServiceGrpcTransport -_transport_registry['grpc_asyncio'] = PipelineServiceGrpcAsyncIOTransport - -__all__ = ( - 'PipelineServiceTransport', - 'PipelineServiceGrpcTransport', - 'PipelineServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/base.py deleted file mode 100644 index 5304d8a300..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/base.py +++ /dev/null @@ -1,284 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import pkg_resources - -import google.auth # type: ignore -import google.api_core -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.aiplatform_v1beta1.types import pipeline_job -from google.cloud.aiplatform_v1beta1.types import pipeline_job as gca_pipeline_job -from google.cloud.aiplatform_v1beta1.types import pipeline_service -from google.cloud.aiplatform_v1beta1.types import training_pipeline -from google.cloud.aiplatform_v1beta1.types import training_pipeline as gca_training_pipeline -from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-aiplatform', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -class PipelineServiceTransport(abc.ABC): - """Abstract transport class for PipelineService.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'aiplatform.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials are service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.create_training_pipeline: gapic_v1.method.wrap_method( - self.create_training_pipeline, - default_timeout=5.0, - client_info=client_info, - ), - self.get_training_pipeline: gapic_v1.method.wrap_method( - self.get_training_pipeline, - default_timeout=5.0, - client_info=client_info, - ), - self.list_training_pipelines: gapic_v1.method.wrap_method( - self.list_training_pipelines, - default_timeout=5.0, - client_info=client_info, - ), - self.delete_training_pipeline: gapic_v1.method.wrap_method( - self.delete_training_pipeline, - default_timeout=5.0, - client_info=client_info, - ), - self.cancel_training_pipeline: gapic_v1.method.wrap_method( - self.cancel_training_pipeline, - default_timeout=5.0, - client_info=client_info, - ), - self.create_pipeline_job: gapic_v1.method.wrap_method( - self.create_pipeline_job, - default_timeout=None, - client_info=client_info, - ), - self.get_pipeline_job: gapic_v1.method.wrap_method( - self.get_pipeline_job, - default_timeout=None, - client_info=client_info, - ), - self.list_pipeline_jobs: gapic_v1.method.wrap_method( - self.list_pipeline_jobs, - default_timeout=None, - client_info=client_info, - ), - self.delete_pipeline_job: gapic_v1.method.wrap_method( - self.delete_pipeline_job, - default_timeout=None, - client_info=client_info, - ), - self.cancel_pipeline_job: gapic_v1.method.wrap_method( - self.cancel_pipeline_job, - default_timeout=None, - client_info=client_info, - ), - } - - def close(self): - """Closes resources associated with the transport. - - .. warning:: - Only call this method if the transport is NOT shared - with other clients - this may cause errors in other clients! - """ - raise NotImplementedError() - - @property - def operations_client(self): - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def create_training_pipeline(self) -> Callable[ - [pipeline_service.CreateTrainingPipelineRequest], - Union[ - gca_training_pipeline.TrainingPipeline, - Awaitable[gca_training_pipeline.TrainingPipeline] - ]]: - raise NotImplementedError() - - @property - def get_training_pipeline(self) -> Callable[ - [pipeline_service.GetTrainingPipelineRequest], - Union[ - training_pipeline.TrainingPipeline, - Awaitable[training_pipeline.TrainingPipeline] - ]]: - raise NotImplementedError() - - @property - def list_training_pipelines(self) -> Callable[ - [pipeline_service.ListTrainingPipelinesRequest], - Union[ - pipeline_service.ListTrainingPipelinesResponse, - Awaitable[pipeline_service.ListTrainingPipelinesResponse] - ]]: - raise NotImplementedError() - - @property - def delete_training_pipeline(self) -> Callable[ - [pipeline_service.DeleteTrainingPipelineRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def cancel_training_pipeline(self) -> Callable[ - [pipeline_service.CancelTrainingPipelineRequest], - Union[ - empty_pb2.Empty, - Awaitable[empty_pb2.Empty] - ]]: - raise NotImplementedError() - - @property - def create_pipeline_job(self) -> Callable[ - [pipeline_service.CreatePipelineJobRequest], - Union[ - gca_pipeline_job.PipelineJob, - Awaitable[gca_pipeline_job.PipelineJob] - ]]: - raise NotImplementedError() - - @property - def get_pipeline_job(self) -> Callable[ - [pipeline_service.GetPipelineJobRequest], - Union[ - pipeline_job.PipelineJob, - Awaitable[pipeline_job.PipelineJob] - ]]: - raise NotImplementedError() - - @property - def list_pipeline_jobs(self) -> Callable[ - [pipeline_service.ListPipelineJobsRequest], - Union[ - pipeline_service.ListPipelineJobsResponse, - Awaitable[pipeline_service.ListPipelineJobsResponse] - ]]: - raise NotImplementedError() - - @property - def delete_pipeline_job(self) -> Callable[ - [pipeline_service.DeletePipelineJobRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def cancel_pipeline_job(self) -> Callable[ - [pipeline_service.CancelPipelineJobRequest], - Union[ - empty_pb2.Empty, - Awaitable[empty_pb2.Empty] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'PipelineServiceTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc.py deleted file mode 100644 index 453a952bdf..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc.py +++ /dev/null @@ -1,541 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers -from google.api_core import operations_v1 -from google.api_core import gapic_v1 -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.aiplatform_v1beta1.types import pipeline_job -from google.cloud.aiplatform_v1beta1.types import pipeline_job as gca_pipeline_job -from google.cloud.aiplatform_v1beta1.types import pipeline_service -from google.cloud.aiplatform_v1beta1.types import training_pipeline -from google.cloud.aiplatform_v1beta1.types import training_pipeline as gca_training_pipeline -from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from .base import PipelineServiceTransport, DEFAULT_CLIENT_INFO - - -class PipelineServiceGrpcTransport(PipelineServiceTransport): - """gRPC backend transport for PipelineService. - - A service for creating and managing Vertex AI's pipelines. This - includes both ``TrainingPipeline`` resources (used for AutoML and - custom training) and ``PipelineJob`` resources (used for Vertex AI - Pipelines). - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_training_pipeline(self) -> Callable[ - [pipeline_service.CreateTrainingPipelineRequest], - gca_training_pipeline.TrainingPipeline]: - r"""Return a callable for the create training pipeline method over gRPC. - - Creates a TrainingPipeline. A created - TrainingPipeline right away will be attempted to be run. - - Returns: - Callable[[~.CreateTrainingPipelineRequest], - ~.TrainingPipeline]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_training_pipeline' not in self._stubs: - self._stubs['create_training_pipeline'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.PipelineService/CreateTrainingPipeline', - request_serializer=pipeline_service.CreateTrainingPipelineRequest.serialize, - response_deserializer=gca_training_pipeline.TrainingPipeline.deserialize, - ) - return self._stubs['create_training_pipeline'] - - @property - def get_training_pipeline(self) -> Callable[ - [pipeline_service.GetTrainingPipelineRequest], - training_pipeline.TrainingPipeline]: - r"""Return a callable for the get training pipeline method over gRPC. - - Gets a TrainingPipeline. - - Returns: - Callable[[~.GetTrainingPipelineRequest], - ~.TrainingPipeline]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_training_pipeline' not in self._stubs: - self._stubs['get_training_pipeline'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.PipelineService/GetTrainingPipeline', - request_serializer=pipeline_service.GetTrainingPipelineRequest.serialize, - response_deserializer=training_pipeline.TrainingPipeline.deserialize, - ) - return self._stubs['get_training_pipeline'] - - @property - def list_training_pipelines(self) -> Callable[ - [pipeline_service.ListTrainingPipelinesRequest], - pipeline_service.ListTrainingPipelinesResponse]: - r"""Return a callable for the list training pipelines method over gRPC. - - Lists TrainingPipelines in a Location. - - Returns: - Callable[[~.ListTrainingPipelinesRequest], - ~.ListTrainingPipelinesResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_training_pipelines' not in self._stubs: - self._stubs['list_training_pipelines'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.PipelineService/ListTrainingPipelines', - request_serializer=pipeline_service.ListTrainingPipelinesRequest.serialize, - response_deserializer=pipeline_service.ListTrainingPipelinesResponse.deserialize, - ) - return self._stubs['list_training_pipelines'] - - @property - def delete_training_pipeline(self) -> Callable[ - [pipeline_service.DeleteTrainingPipelineRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete training pipeline method over gRPC. - - Deletes a TrainingPipeline. - - Returns: - Callable[[~.DeleteTrainingPipelineRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_training_pipeline' not in self._stubs: - self._stubs['delete_training_pipeline'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.PipelineService/DeleteTrainingPipeline', - request_serializer=pipeline_service.DeleteTrainingPipelineRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_training_pipeline'] - - @property - def cancel_training_pipeline(self) -> Callable[ - [pipeline_service.CancelTrainingPipelineRequest], - empty_pb2.Empty]: - r"""Return a callable for the cancel training pipeline method over gRPC. - - Cancels a TrainingPipeline. Starts asynchronous cancellation on - the TrainingPipeline. The server makes a best effort to cancel - the pipeline, but success is not guaranteed. Clients can use - [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.GetTrainingPipeline] - or other methods to check whether the cancellation succeeded or - whether the pipeline completed despite cancellation. On - successful cancellation, the TrainingPipeline is not deleted; - instead it becomes a pipeline with a - [TrainingPipeline.error][google.cloud.aiplatform.v1beta1.TrainingPipeline.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [TrainingPipeline.state][google.cloud.aiplatform.v1beta1.TrainingPipeline.state] - is set to ``CANCELLED``. - - Returns: - Callable[[~.CancelTrainingPipelineRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'cancel_training_pipeline' not in self._stubs: - self._stubs['cancel_training_pipeline'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.PipelineService/CancelTrainingPipeline', - request_serializer=pipeline_service.CancelTrainingPipelineRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['cancel_training_pipeline'] - - @property - def create_pipeline_job(self) -> Callable[ - [pipeline_service.CreatePipelineJobRequest], - gca_pipeline_job.PipelineJob]: - r"""Return a callable for the create pipeline job method over gRPC. - - Creates a PipelineJob. A PipelineJob will run - immediately when created. - - Returns: - Callable[[~.CreatePipelineJobRequest], - ~.PipelineJob]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_pipeline_job' not in self._stubs: - self._stubs['create_pipeline_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.PipelineService/CreatePipelineJob', - request_serializer=pipeline_service.CreatePipelineJobRequest.serialize, - response_deserializer=gca_pipeline_job.PipelineJob.deserialize, - ) - return self._stubs['create_pipeline_job'] - - @property - def get_pipeline_job(self) -> Callable[ - [pipeline_service.GetPipelineJobRequest], - pipeline_job.PipelineJob]: - r"""Return a callable for the get pipeline job method over gRPC. - - Gets a PipelineJob. - - Returns: - Callable[[~.GetPipelineJobRequest], - ~.PipelineJob]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_pipeline_job' not in self._stubs: - self._stubs['get_pipeline_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.PipelineService/GetPipelineJob', - request_serializer=pipeline_service.GetPipelineJobRequest.serialize, - response_deserializer=pipeline_job.PipelineJob.deserialize, - ) - return self._stubs['get_pipeline_job'] - - @property - def list_pipeline_jobs(self) -> Callable[ - [pipeline_service.ListPipelineJobsRequest], - pipeline_service.ListPipelineJobsResponse]: - r"""Return a callable for the list pipeline jobs method over gRPC. - - Lists PipelineJobs in a Location. - - Returns: - Callable[[~.ListPipelineJobsRequest], - ~.ListPipelineJobsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_pipeline_jobs' not in self._stubs: - self._stubs['list_pipeline_jobs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.PipelineService/ListPipelineJobs', - request_serializer=pipeline_service.ListPipelineJobsRequest.serialize, - response_deserializer=pipeline_service.ListPipelineJobsResponse.deserialize, - ) - return self._stubs['list_pipeline_jobs'] - - @property - def delete_pipeline_job(self) -> Callable[ - [pipeline_service.DeletePipelineJobRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete pipeline job method over gRPC. - - Deletes a PipelineJob. - - Returns: - Callable[[~.DeletePipelineJobRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_pipeline_job' not in self._stubs: - self._stubs['delete_pipeline_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.PipelineService/DeletePipelineJob', - request_serializer=pipeline_service.DeletePipelineJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_pipeline_job'] - - @property - def cancel_pipeline_job(self) -> Callable[ - [pipeline_service.CancelPipelineJobRequest], - empty_pb2.Empty]: - r"""Return a callable for the cancel pipeline job method over gRPC. - - Cancels a PipelineJob. Starts asynchronous cancellation on the - PipelineJob. The server makes a best effort to cancel the - pipeline, but success is not guaranteed. Clients can use - [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.GetPipelineJob] - or other methods to check whether the cancellation succeeded or - whether the pipeline completed despite cancellation. On - successful cancellation, the PipelineJob is not deleted; instead - it becomes a pipeline with a - [PipelineJob.error][google.cloud.aiplatform.v1beta1.PipelineJob.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [PipelineJob.state][google.cloud.aiplatform.v1beta1.PipelineJob.state] - is set to ``CANCELLED``. - - Returns: - Callable[[~.CancelPipelineJobRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'cancel_pipeline_job' not in self._stubs: - self._stubs['cancel_pipeline_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.PipelineService/CancelPipelineJob', - request_serializer=pipeline_service.CancelPipelineJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['cancel_pipeline_job'] - - def close(self): - self.grpc_channel.close() - -__all__ = ( - 'PipelineServiceGrpcTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc_asyncio.py deleted file mode 100644 index 97a979bae0..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/grpc_asyncio.py +++ /dev/null @@ -1,545 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers_async -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.aiplatform_v1beta1.types import pipeline_job -from google.cloud.aiplatform_v1beta1.types import pipeline_job as gca_pipeline_job -from google.cloud.aiplatform_v1beta1.types import pipeline_service -from google.cloud.aiplatform_v1beta1.types import training_pipeline -from google.cloud.aiplatform_v1beta1.types import training_pipeline as gca_training_pipeline -from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from .base import PipelineServiceTransport, DEFAULT_CLIENT_INFO -from .grpc import PipelineServiceGrpcTransport - - -class PipelineServiceGrpcAsyncIOTransport(PipelineServiceTransport): - """gRPC AsyncIO backend transport for PipelineService. - - A service for creating and managing Vertex AI's pipelines. This - includes both ``TrainingPipeline`` resources (used for AutoML and - custom training) and ``PipelineJob`` resources (used for Vertex AI - Pipelines). - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsAsyncClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_training_pipeline(self) -> Callable[ - [pipeline_service.CreateTrainingPipelineRequest], - Awaitable[gca_training_pipeline.TrainingPipeline]]: - r"""Return a callable for the create training pipeline method over gRPC. - - Creates a TrainingPipeline. A created - TrainingPipeline right away will be attempted to be run. - - Returns: - Callable[[~.CreateTrainingPipelineRequest], - Awaitable[~.TrainingPipeline]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_training_pipeline' not in self._stubs: - self._stubs['create_training_pipeline'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.PipelineService/CreateTrainingPipeline', - request_serializer=pipeline_service.CreateTrainingPipelineRequest.serialize, - response_deserializer=gca_training_pipeline.TrainingPipeline.deserialize, - ) - return self._stubs['create_training_pipeline'] - - @property - def get_training_pipeline(self) -> Callable[ - [pipeline_service.GetTrainingPipelineRequest], - Awaitable[training_pipeline.TrainingPipeline]]: - r"""Return a callable for the get training pipeline method over gRPC. - - Gets a TrainingPipeline. - - Returns: - Callable[[~.GetTrainingPipelineRequest], - Awaitable[~.TrainingPipeline]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_training_pipeline' not in self._stubs: - self._stubs['get_training_pipeline'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.PipelineService/GetTrainingPipeline', - request_serializer=pipeline_service.GetTrainingPipelineRequest.serialize, - response_deserializer=training_pipeline.TrainingPipeline.deserialize, - ) - return self._stubs['get_training_pipeline'] - - @property - def list_training_pipelines(self) -> Callable[ - [pipeline_service.ListTrainingPipelinesRequest], - Awaitable[pipeline_service.ListTrainingPipelinesResponse]]: - r"""Return a callable for the list training pipelines method over gRPC. - - Lists TrainingPipelines in a Location. - - Returns: - Callable[[~.ListTrainingPipelinesRequest], - Awaitable[~.ListTrainingPipelinesResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_training_pipelines' not in self._stubs: - self._stubs['list_training_pipelines'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.PipelineService/ListTrainingPipelines', - request_serializer=pipeline_service.ListTrainingPipelinesRequest.serialize, - response_deserializer=pipeline_service.ListTrainingPipelinesResponse.deserialize, - ) - return self._stubs['list_training_pipelines'] - - @property - def delete_training_pipeline(self) -> Callable[ - [pipeline_service.DeleteTrainingPipelineRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete training pipeline method over gRPC. - - Deletes a TrainingPipeline. - - Returns: - Callable[[~.DeleteTrainingPipelineRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_training_pipeline' not in self._stubs: - self._stubs['delete_training_pipeline'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.PipelineService/DeleteTrainingPipeline', - request_serializer=pipeline_service.DeleteTrainingPipelineRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_training_pipeline'] - - @property - def cancel_training_pipeline(self) -> Callable[ - [pipeline_service.CancelTrainingPipelineRequest], - Awaitable[empty_pb2.Empty]]: - r"""Return a callable for the cancel training pipeline method over gRPC. - - Cancels a TrainingPipeline. Starts asynchronous cancellation on - the TrainingPipeline. The server makes a best effort to cancel - the pipeline, but success is not guaranteed. Clients can use - [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.GetTrainingPipeline] - or other methods to check whether the cancellation succeeded or - whether the pipeline completed despite cancellation. On - successful cancellation, the TrainingPipeline is not deleted; - instead it becomes a pipeline with a - [TrainingPipeline.error][google.cloud.aiplatform.v1beta1.TrainingPipeline.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [TrainingPipeline.state][google.cloud.aiplatform.v1beta1.TrainingPipeline.state] - is set to ``CANCELLED``. - - Returns: - Callable[[~.CancelTrainingPipelineRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'cancel_training_pipeline' not in self._stubs: - self._stubs['cancel_training_pipeline'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.PipelineService/CancelTrainingPipeline', - request_serializer=pipeline_service.CancelTrainingPipelineRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['cancel_training_pipeline'] - - @property - def create_pipeline_job(self) -> Callable[ - [pipeline_service.CreatePipelineJobRequest], - Awaitable[gca_pipeline_job.PipelineJob]]: - r"""Return a callable for the create pipeline job method over gRPC. - - Creates a PipelineJob. A PipelineJob will run - immediately when created. - - Returns: - Callable[[~.CreatePipelineJobRequest], - Awaitable[~.PipelineJob]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_pipeline_job' not in self._stubs: - self._stubs['create_pipeline_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.PipelineService/CreatePipelineJob', - request_serializer=pipeline_service.CreatePipelineJobRequest.serialize, - response_deserializer=gca_pipeline_job.PipelineJob.deserialize, - ) - return self._stubs['create_pipeline_job'] - - @property - def get_pipeline_job(self) -> Callable[ - [pipeline_service.GetPipelineJobRequest], - Awaitable[pipeline_job.PipelineJob]]: - r"""Return a callable for the get pipeline job method over gRPC. - - Gets a PipelineJob. - - Returns: - Callable[[~.GetPipelineJobRequest], - Awaitable[~.PipelineJob]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_pipeline_job' not in self._stubs: - self._stubs['get_pipeline_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.PipelineService/GetPipelineJob', - request_serializer=pipeline_service.GetPipelineJobRequest.serialize, - response_deserializer=pipeline_job.PipelineJob.deserialize, - ) - return self._stubs['get_pipeline_job'] - - @property - def list_pipeline_jobs(self) -> Callable[ - [pipeline_service.ListPipelineJobsRequest], - Awaitable[pipeline_service.ListPipelineJobsResponse]]: - r"""Return a callable for the list pipeline jobs method over gRPC. - - Lists PipelineJobs in a Location. - - Returns: - Callable[[~.ListPipelineJobsRequest], - Awaitable[~.ListPipelineJobsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_pipeline_jobs' not in self._stubs: - self._stubs['list_pipeline_jobs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.PipelineService/ListPipelineJobs', - request_serializer=pipeline_service.ListPipelineJobsRequest.serialize, - response_deserializer=pipeline_service.ListPipelineJobsResponse.deserialize, - ) - return self._stubs['list_pipeline_jobs'] - - @property - def delete_pipeline_job(self) -> Callable[ - [pipeline_service.DeletePipelineJobRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete pipeline job method over gRPC. - - Deletes a PipelineJob. - - Returns: - Callable[[~.DeletePipelineJobRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_pipeline_job' not in self._stubs: - self._stubs['delete_pipeline_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.PipelineService/DeletePipelineJob', - request_serializer=pipeline_service.DeletePipelineJobRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_pipeline_job'] - - @property - def cancel_pipeline_job(self) -> Callable[ - [pipeline_service.CancelPipelineJobRequest], - Awaitable[empty_pb2.Empty]]: - r"""Return a callable for the cancel pipeline job method over gRPC. - - Cancels a PipelineJob. Starts asynchronous cancellation on the - PipelineJob. The server makes a best effort to cancel the - pipeline, but success is not guaranteed. Clients can use - [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.GetPipelineJob] - or other methods to check whether the cancellation succeeded or - whether the pipeline completed despite cancellation. On - successful cancellation, the PipelineJob is not deleted; instead - it becomes a pipeline with a - [PipelineJob.error][google.cloud.aiplatform.v1beta1.PipelineJob.error] - value with a [google.rpc.Status.code][google.rpc.Status.code] of - 1, corresponding to ``Code.CANCELLED``, and - [PipelineJob.state][google.cloud.aiplatform.v1beta1.PipelineJob.state] - is set to ``CANCELLED``. - - Returns: - Callable[[~.CancelPipelineJobRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'cancel_pipeline_job' not in self._stubs: - self._stubs['cancel_pipeline_job'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.PipelineService/CancelPipelineJob', - request_serializer=pipeline_service.CancelPipelineJobRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['cancel_pipeline_job'] - - def close(self): - return self.grpc_channel.close() - - -__all__ = ( - 'PipelineServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/__init__.py deleted file mode 100644 index 13c5d11c66..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import PredictionServiceClient -from .async_client import PredictionServiceAsyncClient - -__all__ = ( - 'PredictionServiceClient', - 'PredictionServiceAsyncClient', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py deleted file mode 100644 index 6fe3fa1a68..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py +++ /dev/null @@ -1,574 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core.client_options import ClientOptions -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api import httpbody_pb2 # type: ignore -from google.cloud.aiplatform_v1beta1.types import explanation -from google.cloud.aiplatform_v1beta1.types import prediction_service -from google.protobuf import any_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from .transports.base import PredictionServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import PredictionServiceGrpcAsyncIOTransport -from .client import PredictionServiceClient - - -class PredictionServiceAsyncClient: - """A service for online predictions and explanations.""" - - _client: PredictionServiceClient - - DEFAULT_ENDPOINT = PredictionServiceClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = PredictionServiceClient.DEFAULT_MTLS_ENDPOINT - - endpoint_path = staticmethod(PredictionServiceClient.endpoint_path) - parse_endpoint_path = staticmethod(PredictionServiceClient.parse_endpoint_path) - model_path = staticmethod(PredictionServiceClient.model_path) - parse_model_path = staticmethod(PredictionServiceClient.parse_model_path) - common_billing_account_path = staticmethod(PredictionServiceClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(PredictionServiceClient.parse_common_billing_account_path) - common_folder_path = staticmethod(PredictionServiceClient.common_folder_path) - parse_common_folder_path = staticmethod(PredictionServiceClient.parse_common_folder_path) - common_organization_path = staticmethod(PredictionServiceClient.common_organization_path) - parse_common_organization_path = staticmethod(PredictionServiceClient.parse_common_organization_path) - common_project_path = staticmethod(PredictionServiceClient.common_project_path) - parse_common_project_path = staticmethod(PredictionServiceClient.parse_common_project_path) - common_location_path = staticmethod(PredictionServiceClient.common_location_path) - parse_common_location_path = staticmethod(PredictionServiceClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - PredictionServiceAsyncClient: The constructed client. - """ - return PredictionServiceClient.from_service_account_info.__func__(PredictionServiceAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - PredictionServiceAsyncClient: The constructed client. - """ - return PredictionServiceClient.from_service_account_file.__func__(PredictionServiceAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> PredictionServiceTransport: - """Returns the transport used by the client instance. - - Returns: - PredictionServiceTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(PredictionServiceClient).get_transport_class, type(PredictionServiceClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, PredictionServiceTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the prediction service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.PredictionServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = PredictionServiceClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def predict(self, - request: Union[prediction_service.PredictRequest, dict] = None, - *, - endpoint: str = None, - instances: Sequence[struct_pb2.Value] = None, - parameters: struct_pb2.Value = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> prediction_service.PredictResponse: - r"""Perform an online prediction. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.PredictRequest, dict]): - The request object. Request message for - [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict]. - endpoint (:class:`str`): - Required. The name of the Endpoint requested to serve - the prediction. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - - This corresponds to the ``endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - instances (:class:`Sequence[google.protobuf.struct_pb2.Value]`): - Required. The instances that are the input to the - prediction call. A DeployedModel may have an upper limit - on the number of instances it supports per request, and - when it is exceeded the prediction call errors in case - of AutoML Models, or, in case of customer created - Models, the behaviour is as documented by that Model. - The schema of any single instance may be specified via - Endpoint's DeployedModels' - [Model's][google.cloud.aiplatform.v1beta1.DeployedModel.model] - [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]. - - This corresponds to the ``instances`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - parameters (:class:`google.protobuf.struct_pb2.Value`): - The parameters that govern the prediction. The schema of - the parameters may be specified via Endpoint's - DeployedModels' [Model's - ][google.cloud.aiplatform.v1beta1.DeployedModel.model] - [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - [parameters_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri]. - - This corresponds to the ``parameters`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.PredictResponse: - Response message for - [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, instances, parameters]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = prediction_service.PredictRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if endpoint is not None: - request.endpoint = endpoint - if parameters is not None: - request.parameters = parameters - if instances: - request.instances.extend(instances) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.predict, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("endpoint", request.endpoint), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def raw_predict(self, - request: Union[prediction_service.RawPredictRequest, dict] = None, - *, - endpoint: str = None, - http_body: httpbody_pb2.HttpBody = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> httpbody_pb2.HttpBody: - r"""Perform an online prediction with an arbitrary HTTP payload. - - The response includes the following HTTP headers: - - - ``X-Vertex-AI-Endpoint-Id``: ID of the - [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] that - served this prediction. - - - ``X-Vertex-AI-Deployed-Model-Id``: ID of the Endpoint's - [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] - that served this prediction. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.RawPredictRequest, dict]): - The request object. Request message for - [PredictionService.RawPredict][google.cloud.aiplatform.v1beta1.PredictionService.RawPredict]. - endpoint (:class:`str`): - Required. The name of the Endpoint requested to serve - the prediction. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - - This corresponds to the ``endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - http_body (:class:`google.api.httpbody_pb2.HttpBody`): - The prediction input. Supports HTTP headers and - arbitrary data payload. - - A - [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] - may have an upper limit on the number of instances it - supports per request. When this limit it is exceeded for - an AutoML model, the - [RawPredict][google.cloud.aiplatform.v1beta1.PredictionService.RawPredict] - method returns an error. When this limit is exceeded for - a custom-trained model, the behavior varies depending on - the model. - - You can specify the schema for each instance in the - [predict_schemata.instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri] - field when you create a - [Model][google.cloud.aiplatform.v1beta1.Model]. This - schema applies when you deploy the ``Model`` as a - ``DeployedModel`` to an - [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] and - use the ``RawPredict`` method. - - This corresponds to the ``http_body`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api.httpbody_pb2.HttpBody: - Message that represents an arbitrary HTTP body. It should only be used for - payload formats that can't be represented as JSON, - such as raw binary or an HTML page. - - This message can be used both in streaming and - non-streaming API methods in the request as well as - the response. - - It can be used as a top-level request field, which is - convenient if one wants to extract parameters from - either the URL or HTTP template into the request - fields and also want access to the raw HTTP body. - - Example: - - message GetResourceRequest { - // A unique request id. string request_id = 1; - - // The raw HTTP body is bound to this field. - google.api.HttpBody http_body = 2; - - } - - service ResourceService { - rpc GetResource(GetResourceRequest) - returns (google.api.HttpBody); - - rpc UpdateResource(google.api.HttpBody) - returns (google.protobuf.Empty); - - } - - Example with streaming methods: - - service CaldavService { - rpc GetCalendar(stream google.api.HttpBody) - returns (stream google.api.HttpBody); - - rpc UpdateCalendar(stream google.api.HttpBody) - returns (stream google.api.HttpBody); - - } - - Use of this type only changes how the request and - response bodies are handled, all other features will - continue to work unchanged. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, http_body]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = prediction_service.RawPredictRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if endpoint is not None: - request.endpoint = endpoint - if http_body is not None: - request.http_body = http_body - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.raw_predict, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("endpoint", request.endpoint), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def explain(self, - request: Union[prediction_service.ExplainRequest, dict] = None, - *, - endpoint: str = None, - instances: Sequence[struct_pb2.Value] = None, - parameters: struct_pb2.Value = None, - deployed_model_id: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> prediction_service.ExplainResponse: - r"""Perform an online explanation. - - If - [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id] - is specified, the corresponding DeployModel must have - [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] - populated. If - [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id] - is not specified, all DeployedModels must have - [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] - populated. Only deployed AutoML tabular Models have - explanation_spec. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ExplainRequest, dict]): - The request object. Request message for - [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]. - endpoint (:class:`str`): - Required. The name of the Endpoint requested to serve - the explanation. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - - This corresponds to the ``endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - instances (:class:`Sequence[google.protobuf.struct_pb2.Value]`): - Required. The instances that are the input to the - explanation call. A DeployedModel may have an upper - limit on the number of instances it supports per - request, and when it is exceeded the explanation call - errors in case of AutoML Models, or, in case of customer - created Models, the behaviour is as documented by that - Model. The schema of any single instance may be - specified via Endpoint's DeployedModels' - [Model's][google.cloud.aiplatform.v1beta1.DeployedModel.model] - [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]. - - This corresponds to the ``instances`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - parameters (:class:`google.protobuf.struct_pb2.Value`): - The parameters that govern the prediction. The schema of - the parameters may be specified via Endpoint's - DeployedModels' [Model's - ][google.cloud.aiplatform.v1beta1.DeployedModel.model] - [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - [parameters_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri]. - - This corresponds to the ``parameters`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - deployed_model_id (:class:`str`): - If specified, this ExplainRequest will be served by the - chosen DeployedModel, overriding - [Endpoint.traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split]. - - This corresponds to the ``deployed_model_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.ExplainResponse: - Response message for - [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, instances, parameters, deployed_model_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = prediction_service.ExplainRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if endpoint is not None: - request.endpoint = endpoint - if parameters is not None: - request.parameters = parameters - if deployed_model_id is not None: - request.deployed_model_id = deployed_model_id - if instances: - request.instances.extend(instances) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.explain, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("endpoint", request.endpoint), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def __aenter__(self): - return self - - async def __aexit__(self, exc_type, exc, tb): - await self.transport.close() - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "PredictionServiceAsyncClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py deleted file mode 100644 index 22c0516177..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py +++ /dev/null @@ -1,781 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import os -import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api import httpbody_pb2 # type: ignore -from google.cloud.aiplatform_v1beta1.types import explanation -from google.cloud.aiplatform_v1beta1.types import prediction_service -from google.protobuf import any_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from .transports.base import PredictionServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import PredictionServiceGrpcTransport -from .transports.grpc_asyncio import PredictionServiceGrpcAsyncIOTransport - - -class PredictionServiceClientMeta(type): - """Metaclass for the PredictionService client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[PredictionServiceTransport]] - _transport_registry["grpc"] = PredictionServiceGrpcTransport - _transport_registry["grpc_asyncio"] = PredictionServiceGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[PredictionServiceTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class PredictionServiceClient(metaclass=PredictionServiceClientMeta): - """A service for online predictions and explanations.""" - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "aiplatform.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - PredictionServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - PredictionServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> PredictionServiceTransport: - """Returns the transport used by the client instance. - - Returns: - PredictionServiceTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def endpoint_path(project: str,location: str,endpoint: str,) -> str: - """Returns a fully-qualified endpoint string.""" - return "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) - - @staticmethod - def parse_endpoint_path(path: str) -> Dict[str,str]: - """Parses a endpoint path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/endpoints/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def model_path(project: str,location: str,model: str,) -> str: - """Returns a fully-qualified model string.""" - return "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) - - @staticmethod - def parse_model_path(path: str) -> Dict[str,str]: - """Parses a model path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, PredictionServiceTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the prediction service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, PredictionServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): - raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, PredictionServiceTransport): - # transport is a PredictionServiceTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - always_use_jwt_access=True, - ) - - def predict(self, - request: Union[prediction_service.PredictRequest, dict] = None, - *, - endpoint: str = None, - instances: Sequence[struct_pb2.Value] = None, - parameters: struct_pb2.Value = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> prediction_service.PredictResponse: - r"""Perform an online prediction. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.PredictRequest, dict]): - The request object. Request message for - [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict]. - endpoint (str): - Required. The name of the Endpoint requested to serve - the prediction. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - - This corresponds to the ``endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - instances (Sequence[google.protobuf.struct_pb2.Value]): - Required. The instances that are the input to the - prediction call. A DeployedModel may have an upper limit - on the number of instances it supports per request, and - when it is exceeded the prediction call errors in case - of AutoML Models, or, in case of customer created - Models, the behaviour is as documented by that Model. - The schema of any single instance may be specified via - Endpoint's DeployedModels' - [Model's][google.cloud.aiplatform.v1beta1.DeployedModel.model] - [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]. - - This corresponds to the ``instances`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - parameters (google.protobuf.struct_pb2.Value): - The parameters that govern the prediction. The schema of - the parameters may be specified via Endpoint's - DeployedModels' [Model's - ][google.cloud.aiplatform.v1beta1.DeployedModel.model] - [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - [parameters_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri]. - - This corresponds to the ``parameters`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.PredictResponse: - Response message for - [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, instances, parameters]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a prediction_service.PredictRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, prediction_service.PredictRequest): - request = prediction_service.PredictRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if endpoint is not None: - request.endpoint = endpoint - if instances is not None: - request.instances.extend(instances) - if parameters is not None: - request.parameters = parameters - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.predict] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("endpoint", request.endpoint), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def raw_predict(self, - request: Union[prediction_service.RawPredictRequest, dict] = None, - *, - endpoint: str = None, - http_body: httpbody_pb2.HttpBody = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> httpbody_pb2.HttpBody: - r"""Perform an online prediction with an arbitrary HTTP payload. - - The response includes the following HTTP headers: - - - ``X-Vertex-AI-Endpoint-Id``: ID of the - [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] that - served this prediction. - - - ``X-Vertex-AI-Deployed-Model-Id``: ID of the Endpoint's - [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] - that served this prediction. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.RawPredictRequest, dict]): - The request object. Request message for - [PredictionService.RawPredict][google.cloud.aiplatform.v1beta1.PredictionService.RawPredict]. - endpoint (str): - Required. The name of the Endpoint requested to serve - the prediction. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - - This corresponds to the ``endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - http_body (google.api.httpbody_pb2.HttpBody): - The prediction input. Supports HTTP headers and - arbitrary data payload. - - A - [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] - may have an upper limit on the number of instances it - supports per request. When this limit it is exceeded for - an AutoML model, the - [RawPredict][google.cloud.aiplatform.v1beta1.PredictionService.RawPredict] - method returns an error. When this limit is exceeded for - a custom-trained model, the behavior varies depending on - the model. - - You can specify the schema for each instance in the - [predict_schemata.instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri] - field when you create a - [Model][google.cloud.aiplatform.v1beta1.Model]. This - schema applies when you deploy the ``Model`` as a - ``DeployedModel`` to an - [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] and - use the ``RawPredict`` method. - - This corresponds to the ``http_body`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api.httpbody_pb2.HttpBody: - Message that represents an arbitrary HTTP body. It should only be used for - payload formats that can't be represented as JSON, - such as raw binary or an HTML page. - - This message can be used both in streaming and - non-streaming API methods in the request as well as - the response. - - It can be used as a top-level request field, which is - convenient if one wants to extract parameters from - either the URL or HTTP template into the request - fields and also want access to the raw HTTP body. - - Example: - - message GetResourceRequest { - // A unique request id. string request_id = 1; - - // The raw HTTP body is bound to this field. - google.api.HttpBody http_body = 2; - - } - - service ResourceService { - rpc GetResource(GetResourceRequest) - returns (google.api.HttpBody); - - rpc UpdateResource(google.api.HttpBody) - returns (google.protobuf.Empty); - - } - - Example with streaming methods: - - service CaldavService { - rpc GetCalendar(stream google.api.HttpBody) - returns (stream google.api.HttpBody); - - rpc UpdateCalendar(stream google.api.HttpBody) - returns (stream google.api.HttpBody); - - } - - Use of this type only changes how the request and - response bodies are handled, all other features will - continue to work unchanged. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, http_body]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a prediction_service.RawPredictRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, prediction_service.RawPredictRequest): - request = prediction_service.RawPredictRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if endpoint is not None: - request.endpoint = endpoint - if http_body is not None: - request.http_body = http_body - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.raw_predict] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("endpoint", request.endpoint), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def explain(self, - request: Union[prediction_service.ExplainRequest, dict] = None, - *, - endpoint: str = None, - instances: Sequence[struct_pb2.Value] = None, - parameters: struct_pb2.Value = None, - deployed_model_id: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> prediction_service.ExplainResponse: - r"""Perform an online explanation. - - If - [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id] - is specified, the corresponding DeployModel must have - [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] - populated. If - [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id] - is not specified, all DeployedModels must have - [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] - populated. Only deployed AutoML tabular Models have - explanation_spec. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ExplainRequest, dict]): - The request object. Request message for - [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]. - endpoint (str): - Required. The name of the Endpoint requested to serve - the explanation. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - - This corresponds to the ``endpoint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - instances (Sequence[google.protobuf.struct_pb2.Value]): - Required. The instances that are the input to the - explanation call. A DeployedModel may have an upper - limit on the number of instances it supports per - request, and when it is exceeded the explanation call - errors in case of AutoML Models, or, in case of customer - created Models, the behaviour is as documented by that - Model. The schema of any single instance may be - specified via Endpoint's DeployedModels' - [Model's][google.cloud.aiplatform.v1beta1.DeployedModel.model] - [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]. - - This corresponds to the ``instances`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - parameters (google.protobuf.struct_pb2.Value): - The parameters that govern the prediction. The schema of - the parameters may be specified via Endpoint's - DeployedModels' [Model's - ][google.cloud.aiplatform.v1beta1.DeployedModel.model] - [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - [parameters_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri]. - - This corresponds to the ``parameters`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - deployed_model_id (str): - If specified, this ExplainRequest will be served by the - chosen DeployedModel, overriding - [Endpoint.traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split]. - - This corresponds to the ``deployed_model_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.ExplainResponse: - Response message for - [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([endpoint, instances, parameters, deployed_model_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a prediction_service.ExplainRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, prediction_service.ExplainRequest): - request = prediction_service.ExplainRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if endpoint is not None: - request.endpoint = endpoint - if instances is not None: - request.instances.extend(instances) - if parameters is not None: - request.parameters = parameters - if deployed_model_id is not None: - request.deployed_model_id = deployed_model_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.explain] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("endpoint", request.endpoint), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - """Releases underlying transport's resources. - - .. warning:: - ONLY use as a context manager if the transport is NOT shared - with other clients! Exiting the with block will CLOSE the transport - and may cause errors in other clients! - """ - self.transport.close() - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "PredictionServiceClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/__init__.py deleted file mode 100644 index d747de2ce9..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import PredictionServiceTransport -from .grpc import PredictionServiceGrpcTransport -from .grpc_asyncio import PredictionServiceGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[PredictionServiceTransport]] -_transport_registry['grpc'] = PredictionServiceGrpcTransport -_transport_registry['grpc_asyncio'] = PredictionServiceGrpcAsyncIOTransport - -__all__ = ( - 'PredictionServiceTransport', - 'PredictionServiceGrpcTransport', - 'PredictionServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/base.py deleted file mode 100644 index 4512613a4e..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/base.py +++ /dev/null @@ -1,175 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import pkg_resources - -import google.auth # type: ignore -import google.api_core -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.api import httpbody_pb2 # type: ignore -from google.cloud.aiplatform_v1beta1.types import prediction_service - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-aiplatform', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -class PredictionServiceTransport(abc.ABC): - """Abstract transport class for PredictionService.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'aiplatform.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials are service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.predict: gapic_v1.method.wrap_method( - self.predict, - default_timeout=5.0, - client_info=client_info, - ), - self.raw_predict: gapic_v1.method.wrap_method( - self.raw_predict, - default_timeout=None, - client_info=client_info, - ), - self.explain: gapic_v1.method.wrap_method( - self.explain, - default_timeout=5.0, - client_info=client_info, - ), - } - - def close(self): - """Closes resources associated with the transport. - - .. warning:: - Only call this method if the transport is NOT shared - with other clients - this may cause errors in other clients! - """ - raise NotImplementedError() - - @property - def predict(self) -> Callable[ - [prediction_service.PredictRequest], - Union[ - prediction_service.PredictResponse, - Awaitable[prediction_service.PredictResponse] - ]]: - raise NotImplementedError() - - @property - def raw_predict(self) -> Callable[ - [prediction_service.RawPredictRequest], - Union[ - httpbody_pb2.HttpBody, - Awaitable[httpbody_pb2.HttpBody] - ]]: - raise NotImplementedError() - - @property - def explain(self) -> Callable[ - [prediction_service.ExplainRequest], - Union[ - prediction_service.ExplainResponse, - Awaitable[prediction_service.ExplainResponse] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'PredictionServiceTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py deleted file mode 100644 index f1de18aaaa..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py +++ /dev/null @@ -1,328 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers -from google.api_core import gapic_v1 -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.api import httpbody_pb2 # type: ignore -from google.cloud.aiplatform_v1beta1.types import prediction_service -from .base import PredictionServiceTransport, DEFAULT_CLIENT_INFO - - -class PredictionServiceGrpcTransport(PredictionServiceTransport): - """gRPC backend transport for PredictionService. - - A service for online predictions and explanations. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def predict(self) -> Callable[ - [prediction_service.PredictRequest], - prediction_service.PredictResponse]: - r"""Return a callable for the predict method over gRPC. - - Perform an online prediction. - - Returns: - Callable[[~.PredictRequest], - ~.PredictResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'predict' not in self._stubs: - self._stubs['predict'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.PredictionService/Predict', - request_serializer=prediction_service.PredictRequest.serialize, - response_deserializer=prediction_service.PredictResponse.deserialize, - ) - return self._stubs['predict'] - - @property - def raw_predict(self) -> Callable[ - [prediction_service.RawPredictRequest], - httpbody_pb2.HttpBody]: - r"""Return a callable for the raw predict method over gRPC. - - Perform an online prediction with an arbitrary HTTP payload. - - The response includes the following HTTP headers: - - - ``X-Vertex-AI-Endpoint-Id``: ID of the - [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] that - served this prediction. - - - ``X-Vertex-AI-Deployed-Model-Id``: ID of the Endpoint's - [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] - that served this prediction. - - Returns: - Callable[[~.RawPredictRequest], - ~.HttpBody]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'raw_predict' not in self._stubs: - self._stubs['raw_predict'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.PredictionService/RawPredict', - request_serializer=prediction_service.RawPredictRequest.serialize, - response_deserializer=httpbody_pb2.HttpBody.FromString, - ) - return self._stubs['raw_predict'] - - @property - def explain(self) -> Callable[ - [prediction_service.ExplainRequest], - prediction_service.ExplainResponse]: - r"""Return a callable for the explain method over gRPC. - - Perform an online explanation. - - If - [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id] - is specified, the corresponding DeployModel must have - [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] - populated. If - [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id] - is not specified, all DeployedModels must have - [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] - populated. Only deployed AutoML tabular Models have - explanation_spec. - - Returns: - Callable[[~.ExplainRequest], - ~.ExplainResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'explain' not in self._stubs: - self._stubs['explain'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.PredictionService/Explain', - request_serializer=prediction_service.ExplainRequest.serialize, - response_deserializer=prediction_service.ExplainResponse.deserialize, - ) - return self._stubs['explain'] - - def close(self): - self.grpc_channel.close() - -__all__ = ( - 'PredictionServiceGrpcTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc_asyncio.py deleted file mode 100644 index c3f92425a9..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc_asyncio.py +++ /dev/null @@ -1,332 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers_async -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.api import httpbody_pb2 # type: ignore -from google.cloud.aiplatform_v1beta1.types import prediction_service -from .base import PredictionServiceTransport, DEFAULT_CLIENT_INFO -from .grpc import PredictionServiceGrpcTransport - - -class PredictionServiceGrpcAsyncIOTransport(PredictionServiceTransport): - """gRPC AsyncIO backend transport for PredictionService. - - A service for online predictions and explanations. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def predict(self) -> Callable[ - [prediction_service.PredictRequest], - Awaitable[prediction_service.PredictResponse]]: - r"""Return a callable for the predict method over gRPC. - - Perform an online prediction. - - Returns: - Callable[[~.PredictRequest], - Awaitable[~.PredictResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'predict' not in self._stubs: - self._stubs['predict'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.PredictionService/Predict', - request_serializer=prediction_service.PredictRequest.serialize, - response_deserializer=prediction_service.PredictResponse.deserialize, - ) - return self._stubs['predict'] - - @property - def raw_predict(self) -> Callable[ - [prediction_service.RawPredictRequest], - Awaitable[httpbody_pb2.HttpBody]]: - r"""Return a callable for the raw predict method over gRPC. - - Perform an online prediction with an arbitrary HTTP payload. - - The response includes the following HTTP headers: - - - ``X-Vertex-AI-Endpoint-Id``: ID of the - [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] that - served this prediction. - - - ``X-Vertex-AI-Deployed-Model-Id``: ID of the Endpoint's - [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] - that served this prediction. - - Returns: - Callable[[~.RawPredictRequest], - Awaitable[~.HttpBody]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'raw_predict' not in self._stubs: - self._stubs['raw_predict'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.PredictionService/RawPredict', - request_serializer=prediction_service.RawPredictRequest.serialize, - response_deserializer=httpbody_pb2.HttpBody.FromString, - ) - return self._stubs['raw_predict'] - - @property - def explain(self) -> Callable[ - [prediction_service.ExplainRequest], - Awaitable[prediction_service.ExplainResponse]]: - r"""Return a callable for the explain method over gRPC. - - Perform an online explanation. - - If - [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id] - is specified, the corresponding DeployModel must have - [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] - populated. If - [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id] - is not specified, all DeployedModels must have - [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] - populated. Only deployed AutoML tabular Models have - explanation_spec. - - Returns: - Callable[[~.ExplainRequest], - Awaitable[~.ExplainResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'explain' not in self._stubs: - self._stubs['explain'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.PredictionService/Explain', - request_serializer=prediction_service.ExplainRequest.serialize, - response_deserializer=prediction_service.ExplainResponse.deserialize, - ) - return self._stubs['explain'] - - def close(self): - return self.grpc_channel.close() - - -__all__ = ( - 'PredictionServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/__init__.py deleted file mode 100644 index 04af59e5fa..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import SpecialistPoolServiceClient -from .async_client import SpecialistPoolServiceAsyncClient - -__all__ = ( - 'SpecialistPoolServiceClient', - 'SpecialistPoolServiceAsyncClient', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/async_client.py deleted file mode 100644 index 6f872f370b..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/async_client.py +++ /dev/null @@ -1,658 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core.client_options import ClientOptions -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.specialist_pool_service import pagers -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.cloud.aiplatform_v1beta1.types import specialist_pool -from google.cloud.aiplatform_v1beta1.types import specialist_pool as gca_specialist_pool -from google.cloud.aiplatform_v1beta1.types import specialist_pool_service -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from .transports.base import SpecialistPoolServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import SpecialistPoolServiceGrpcAsyncIOTransport -from .client import SpecialistPoolServiceClient - - -class SpecialistPoolServiceAsyncClient: - """A service for creating and managing Customer SpecialistPools. - When customers start Data Labeling jobs, they can reuse/create - Specialist Pools to bring their own Specialists to label the - data. Customers can add/remove Managers for the Specialist Pool - on Cloud console, then Managers will get email notifications to - manage Specialists and tasks on CrowdCompute console. - """ - - _client: SpecialistPoolServiceClient - - DEFAULT_ENDPOINT = SpecialistPoolServiceClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = SpecialistPoolServiceClient.DEFAULT_MTLS_ENDPOINT - - specialist_pool_path = staticmethod(SpecialistPoolServiceClient.specialist_pool_path) - parse_specialist_pool_path = staticmethod(SpecialistPoolServiceClient.parse_specialist_pool_path) - common_billing_account_path = staticmethod(SpecialistPoolServiceClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(SpecialistPoolServiceClient.parse_common_billing_account_path) - common_folder_path = staticmethod(SpecialistPoolServiceClient.common_folder_path) - parse_common_folder_path = staticmethod(SpecialistPoolServiceClient.parse_common_folder_path) - common_organization_path = staticmethod(SpecialistPoolServiceClient.common_organization_path) - parse_common_organization_path = staticmethod(SpecialistPoolServiceClient.parse_common_organization_path) - common_project_path = staticmethod(SpecialistPoolServiceClient.common_project_path) - parse_common_project_path = staticmethod(SpecialistPoolServiceClient.parse_common_project_path) - common_location_path = staticmethod(SpecialistPoolServiceClient.common_location_path) - parse_common_location_path = staticmethod(SpecialistPoolServiceClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - SpecialistPoolServiceAsyncClient: The constructed client. - """ - return SpecialistPoolServiceClient.from_service_account_info.__func__(SpecialistPoolServiceAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - SpecialistPoolServiceAsyncClient: The constructed client. - """ - return SpecialistPoolServiceClient.from_service_account_file.__func__(SpecialistPoolServiceAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> SpecialistPoolServiceTransport: - """Returns the transport used by the client instance. - - Returns: - SpecialistPoolServiceTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(SpecialistPoolServiceClient).get_transport_class, type(SpecialistPoolServiceClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, SpecialistPoolServiceTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the specialist pool service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.SpecialistPoolServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = SpecialistPoolServiceClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def create_specialist_pool(self, - request: Union[specialist_pool_service.CreateSpecialistPoolRequest, dict] = None, - *, - parent: str = None, - specialist_pool: gca_specialist_pool.SpecialistPool = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Creates a SpecialistPool. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CreateSpecialistPoolRequest, dict]): - The request object. Request message for - [SpecialistPoolService.CreateSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.CreateSpecialistPool]. - parent (:class:`str`): - Required. The parent Project name for the new - SpecialistPool. The form is - ``projects/{project}/locations/{location}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - specialist_pool (:class:`google.cloud.aiplatform_v1beta1.types.SpecialistPool`): - Required. The SpecialistPool to - create. - - This corresponds to the ``specialist_pool`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.SpecialistPool` SpecialistPool represents customers' own workforce to work on their data - labeling jobs. It includes a group of specialist - managers and workers. Managers are responsible for - managing the workers in this pool as well as - customers' data labeling jobs associated with this - pool. Customers create specialist pool as well as - start data labeling jobs on Cloud, managers and - workers handle the jobs using CrowdCompute console. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, specialist_pool]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = specialist_pool_service.CreateSpecialistPoolRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if specialist_pool is not None: - request.specialist_pool = specialist_pool - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_specialist_pool, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - gca_specialist_pool.SpecialistPool, - metadata_type=specialist_pool_service.CreateSpecialistPoolOperationMetadata, - ) - - # Done; return the response. - return response - - async def get_specialist_pool(self, - request: Union[specialist_pool_service.GetSpecialistPoolRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> specialist_pool.SpecialistPool: - r"""Gets a SpecialistPool. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.GetSpecialistPoolRequest, dict]): - The request object. Request message for - [SpecialistPoolService.GetSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.GetSpecialistPool]. - name (:class:`str`): - Required. The name of the SpecialistPool resource. The - form is - ``projects/{project}/locations/{location}/specialistPools/{specialist_pool}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.SpecialistPool: - SpecialistPool represents customers' - own workforce to work on their data - labeling jobs. It includes a group of - specialist managers and workers. - Managers are responsible for managing - the workers in this pool as well as - customers' data labeling jobs associated - with this pool. Customers create - specialist pool as well as start data - labeling jobs on Cloud, managers and - workers handle the jobs using - CrowdCompute console. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = specialist_pool_service.GetSpecialistPoolRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_specialist_pool, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_specialist_pools(self, - request: Union[specialist_pool_service.ListSpecialistPoolsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListSpecialistPoolsAsyncPager: - r"""Lists SpecialistPools in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListSpecialistPoolsRequest, dict]): - The request object. Request message for - [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1beta1.SpecialistPoolService.ListSpecialistPools]. - parent (:class:`str`): - Required. The name of the SpecialistPool's parent - resource. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.specialist_pool_service.pagers.ListSpecialistPoolsAsyncPager: - Response message for - [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1beta1.SpecialistPoolService.ListSpecialistPools]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = specialist_pool_service.ListSpecialistPoolsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_specialist_pools, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListSpecialistPoolsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_specialist_pool(self, - request: Union[specialist_pool_service.DeleteSpecialistPoolRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a SpecialistPool as well as all Specialists - in the pool. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.DeleteSpecialistPoolRequest, dict]): - The request object. Request message for - [SpecialistPoolService.DeleteSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.DeleteSpecialistPool]. - name (:class:`str`): - Required. The resource name of the SpecialistPool to - delete. Format: - ``projects/{project}/locations/{location}/specialistPools/{specialist_pool}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = specialist_pool_service.DeleteSpecialistPoolRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_specialist_pool, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def update_specialist_pool(self, - request: Union[specialist_pool_service.UpdateSpecialistPoolRequest, dict] = None, - *, - specialist_pool: gca_specialist_pool.SpecialistPool = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Updates a SpecialistPool. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.UpdateSpecialistPoolRequest, dict]): - The request object. Request message for - [SpecialistPoolService.UpdateSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.UpdateSpecialistPool]. - specialist_pool (:class:`google.cloud.aiplatform_v1beta1.types.SpecialistPool`): - Required. The SpecialistPool which - replaces the resource on the server. - - This corresponds to the ``specialist_pool`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. The update mask applies to - the resource. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.SpecialistPool` SpecialistPool represents customers' own workforce to work on their data - labeling jobs. It includes a group of specialist - managers and workers. Managers are responsible for - managing the workers in this pool as well as - customers' data labeling jobs associated with this - pool. Customers create specialist pool as well as - start data labeling jobs on Cloud, managers and - workers handle the jobs using CrowdCompute console. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([specialist_pool, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = specialist_pool_service.UpdateSpecialistPoolRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if specialist_pool is not None: - request.specialist_pool = specialist_pool - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_specialist_pool, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("specialist_pool.name", request.specialist_pool.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - gca_specialist_pool.SpecialistPool, - metadata_type=specialist_pool_service.UpdateSpecialistPoolOperationMetadata, - ) - - # Done; return the response. - return response - - async def __aenter__(self): - return self - - async def __aexit__(self, exc_type, exc, tb): - await self.transport.close() - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "SpecialistPoolServiceAsyncClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/client.py deleted file mode 100644 index e103cc455c..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/client.py +++ /dev/null @@ -1,856 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import os -import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.specialist_pool_service import pagers -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.cloud.aiplatform_v1beta1.types import specialist_pool -from google.cloud.aiplatform_v1beta1.types import specialist_pool as gca_specialist_pool -from google.cloud.aiplatform_v1beta1.types import specialist_pool_service -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from .transports.base import SpecialistPoolServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import SpecialistPoolServiceGrpcTransport -from .transports.grpc_asyncio import SpecialistPoolServiceGrpcAsyncIOTransport - - -class SpecialistPoolServiceClientMeta(type): - """Metaclass for the SpecialistPoolService client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[SpecialistPoolServiceTransport]] - _transport_registry["grpc"] = SpecialistPoolServiceGrpcTransport - _transport_registry["grpc_asyncio"] = SpecialistPoolServiceGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[SpecialistPoolServiceTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class SpecialistPoolServiceClient(metaclass=SpecialistPoolServiceClientMeta): - """A service for creating and managing Customer SpecialistPools. - When customers start Data Labeling jobs, they can reuse/create - Specialist Pools to bring their own Specialists to label the - data. Customers can add/remove Managers for the Specialist Pool - on Cloud console, then Managers will get email notifications to - manage Specialists and tasks on CrowdCompute console. - """ - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "aiplatform.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - SpecialistPoolServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - SpecialistPoolServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> SpecialistPoolServiceTransport: - """Returns the transport used by the client instance. - - Returns: - SpecialistPoolServiceTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def specialist_pool_path(project: str,location: str,specialist_pool: str,) -> str: - """Returns a fully-qualified specialist_pool string.""" - return "projects/{project}/locations/{location}/specialistPools/{specialist_pool}".format(project=project, location=location, specialist_pool=specialist_pool, ) - - @staticmethod - def parse_specialist_pool_path(path: str) -> Dict[str,str]: - """Parses a specialist_pool path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/specialistPools/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, SpecialistPoolServiceTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the specialist pool service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, SpecialistPoolServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): - raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, SpecialistPoolServiceTransport): - # transport is a SpecialistPoolServiceTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - always_use_jwt_access=True, - ) - - def create_specialist_pool(self, - request: Union[specialist_pool_service.CreateSpecialistPoolRequest, dict] = None, - *, - parent: str = None, - specialist_pool: gca_specialist_pool.SpecialistPool = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Creates a SpecialistPool. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CreateSpecialistPoolRequest, dict]): - The request object. Request message for - [SpecialistPoolService.CreateSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.CreateSpecialistPool]. - parent (str): - Required. The parent Project name for the new - SpecialistPool. The form is - ``projects/{project}/locations/{location}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - specialist_pool (google.cloud.aiplatform_v1beta1.types.SpecialistPool): - Required. The SpecialistPool to - create. - - This corresponds to the ``specialist_pool`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.SpecialistPool` SpecialistPool represents customers' own workforce to work on their data - labeling jobs. It includes a group of specialist - managers and workers. Managers are responsible for - managing the workers in this pool as well as - customers' data labeling jobs associated with this - pool. Customers create specialist pool as well as - start data labeling jobs on Cloud, managers and - workers handle the jobs using CrowdCompute console. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, specialist_pool]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a specialist_pool_service.CreateSpecialistPoolRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, specialist_pool_service.CreateSpecialistPoolRequest): - request = specialist_pool_service.CreateSpecialistPoolRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if specialist_pool is not None: - request.specialist_pool = specialist_pool - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_specialist_pool] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - gca_specialist_pool.SpecialistPool, - metadata_type=specialist_pool_service.CreateSpecialistPoolOperationMetadata, - ) - - # Done; return the response. - return response - - def get_specialist_pool(self, - request: Union[specialist_pool_service.GetSpecialistPoolRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> specialist_pool.SpecialistPool: - r"""Gets a SpecialistPool. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.GetSpecialistPoolRequest, dict]): - The request object. Request message for - [SpecialistPoolService.GetSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.GetSpecialistPool]. - name (str): - Required. The name of the SpecialistPool resource. The - form is - ``projects/{project}/locations/{location}/specialistPools/{specialist_pool}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.SpecialistPool: - SpecialistPool represents customers' - own workforce to work on their data - labeling jobs. It includes a group of - specialist managers and workers. - Managers are responsible for managing - the workers in this pool as well as - customers' data labeling jobs associated - with this pool. Customers create - specialist pool as well as start data - labeling jobs on Cloud, managers and - workers handle the jobs using - CrowdCompute console. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a specialist_pool_service.GetSpecialistPoolRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, specialist_pool_service.GetSpecialistPoolRequest): - request = specialist_pool_service.GetSpecialistPoolRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_specialist_pool] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_specialist_pools(self, - request: Union[specialist_pool_service.ListSpecialistPoolsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListSpecialistPoolsPager: - r"""Lists SpecialistPools in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListSpecialistPoolsRequest, dict]): - The request object. Request message for - [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1beta1.SpecialistPoolService.ListSpecialistPools]. - parent (str): - Required. The name of the SpecialistPool's parent - resource. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.specialist_pool_service.pagers.ListSpecialistPoolsPager: - Response message for - [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1beta1.SpecialistPoolService.ListSpecialistPools]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a specialist_pool_service.ListSpecialistPoolsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, specialist_pool_service.ListSpecialistPoolsRequest): - request = specialist_pool_service.ListSpecialistPoolsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_specialist_pools] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListSpecialistPoolsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_specialist_pool(self, - request: Union[specialist_pool_service.DeleteSpecialistPoolRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a SpecialistPool as well as all Specialists - in the pool. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.DeleteSpecialistPoolRequest, dict]): - The request object. Request message for - [SpecialistPoolService.DeleteSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.DeleteSpecialistPool]. - name (str): - Required. The resource name of the SpecialistPool to - delete. Format: - ``projects/{project}/locations/{location}/specialistPools/{specialist_pool}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a specialist_pool_service.DeleteSpecialistPoolRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, specialist_pool_service.DeleteSpecialistPoolRequest): - request = specialist_pool_service.DeleteSpecialistPoolRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_specialist_pool] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def update_specialist_pool(self, - request: Union[specialist_pool_service.UpdateSpecialistPoolRequest, dict] = None, - *, - specialist_pool: gca_specialist_pool.SpecialistPool = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Updates a SpecialistPool. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.UpdateSpecialistPoolRequest, dict]): - The request object. Request message for - [SpecialistPoolService.UpdateSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.UpdateSpecialistPool]. - specialist_pool (google.cloud.aiplatform_v1beta1.types.SpecialistPool): - Required. The SpecialistPool which - replaces the resource on the server. - - This corresponds to the ``specialist_pool`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The update mask applies to - the resource. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.SpecialistPool` SpecialistPool represents customers' own workforce to work on their data - labeling jobs. It includes a group of specialist - managers and workers. Managers are responsible for - managing the workers in this pool as well as - customers' data labeling jobs associated with this - pool. Customers create specialist pool as well as - start data labeling jobs on Cloud, managers and - workers handle the jobs using CrowdCompute console. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([specialist_pool, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a specialist_pool_service.UpdateSpecialistPoolRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, specialist_pool_service.UpdateSpecialistPoolRequest): - request = specialist_pool_service.UpdateSpecialistPoolRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if specialist_pool is not None: - request.specialist_pool = specialist_pool - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_specialist_pool] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("specialist_pool.name", request.specialist_pool.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - gca_specialist_pool.SpecialistPool, - metadata_type=specialist_pool_service.UpdateSpecialistPoolOperationMetadata, - ) - - # Done; return the response. - return response - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - """Releases underlying transport's resources. - - .. warning:: - ONLY use as a context manager if the transport is NOT shared - with other clients! Exiting the with block will CLOSE the transport - and may cause errors in other clients! - """ - self.transport.close() - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "SpecialistPoolServiceClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/pagers.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/pagers.py deleted file mode 100644 index e3f7e28697..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/pagers.py +++ /dev/null @@ -1,141 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator - -from google.cloud.aiplatform_v1beta1.types import specialist_pool -from google.cloud.aiplatform_v1beta1.types import specialist_pool_service - - -class ListSpecialistPoolsPager: - """A pager for iterating through ``list_specialist_pools`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListSpecialistPoolsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``specialist_pools`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListSpecialistPools`` requests and continue to iterate - through the ``specialist_pools`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListSpecialistPoolsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., specialist_pool_service.ListSpecialistPoolsResponse], - request: specialist_pool_service.ListSpecialistPoolsRequest, - response: specialist_pool_service.ListSpecialistPoolsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListSpecialistPoolsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListSpecialistPoolsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = specialist_pool_service.ListSpecialistPoolsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[specialist_pool_service.ListSpecialistPoolsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[specialist_pool.SpecialistPool]: - for page in self.pages: - yield from page.specialist_pools - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListSpecialistPoolsAsyncPager: - """A pager for iterating through ``list_specialist_pools`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListSpecialistPoolsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``specialist_pools`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListSpecialistPools`` requests and continue to iterate - through the ``specialist_pools`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListSpecialistPoolsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[specialist_pool_service.ListSpecialistPoolsResponse]], - request: specialist_pool_service.ListSpecialistPoolsRequest, - response: specialist_pool_service.ListSpecialistPoolsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListSpecialistPoolsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListSpecialistPoolsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = specialist_pool_service.ListSpecialistPoolsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[specialist_pool_service.ListSpecialistPoolsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[specialist_pool.SpecialistPool]: - async def async_generator(): - async for page in self.pages: - for response in page.specialist_pools: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/__init__.py deleted file mode 100644 index ba8c9d7eb5..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import SpecialistPoolServiceTransport -from .grpc import SpecialistPoolServiceGrpcTransport -from .grpc_asyncio import SpecialistPoolServiceGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[SpecialistPoolServiceTransport]] -_transport_registry['grpc'] = SpecialistPoolServiceGrpcTransport -_transport_registry['grpc_asyncio'] = SpecialistPoolServiceGrpcAsyncIOTransport - -__all__ = ( - 'SpecialistPoolServiceTransport', - 'SpecialistPoolServiceGrpcTransport', - 'SpecialistPoolServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/base.py deleted file mode 100644 index c6551c3ca5..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/base.py +++ /dev/null @@ -1,210 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import pkg_resources - -import google.auth # type: ignore -import google.api_core -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.aiplatform_v1beta1.types import specialist_pool -from google.cloud.aiplatform_v1beta1.types import specialist_pool_service -from google.longrunning import operations_pb2 # type: ignore - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-aiplatform', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -class SpecialistPoolServiceTransport(abc.ABC): - """Abstract transport class for SpecialistPoolService.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'aiplatform.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials are service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.create_specialist_pool: gapic_v1.method.wrap_method( - self.create_specialist_pool, - default_timeout=5.0, - client_info=client_info, - ), - self.get_specialist_pool: gapic_v1.method.wrap_method( - self.get_specialist_pool, - default_timeout=5.0, - client_info=client_info, - ), - self.list_specialist_pools: gapic_v1.method.wrap_method( - self.list_specialist_pools, - default_timeout=5.0, - client_info=client_info, - ), - self.delete_specialist_pool: gapic_v1.method.wrap_method( - self.delete_specialist_pool, - default_timeout=5.0, - client_info=client_info, - ), - self.update_specialist_pool: gapic_v1.method.wrap_method( - self.update_specialist_pool, - default_timeout=5.0, - client_info=client_info, - ), - } - - def close(self): - """Closes resources associated with the transport. - - .. warning:: - Only call this method if the transport is NOT shared - with other clients - this may cause errors in other clients! - """ - raise NotImplementedError() - - @property - def operations_client(self): - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def create_specialist_pool(self) -> Callable[ - [specialist_pool_service.CreateSpecialistPoolRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def get_specialist_pool(self) -> Callable[ - [specialist_pool_service.GetSpecialistPoolRequest], - Union[ - specialist_pool.SpecialistPool, - Awaitable[specialist_pool.SpecialistPool] - ]]: - raise NotImplementedError() - - @property - def list_specialist_pools(self) -> Callable[ - [specialist_pool_service.ListSpecialistPoolsRequest], - Union[ - specialist_pool_service.ListSpecialistPoolsResponse, - Awaitable[specialist_pool_service.ListSpecialistPoolsResponse] - ]]: - raise NotImplementedError() - - @property - def delete_specialist_pool(self) -> Callable[ - [specialist_pool_service.DeleteSpecialistPoolRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def update_specialist_pool(self) -> Callable[ - [specialist_pool_service.UpdateSpecialistPoolRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'SpecialistPoolServiceTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc.py deleted file mode 100644 index 099aef94cc..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc.py +++ /dev/null @@ -1,384 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers -from google.api_core import operations_v1 -from google.api_core import gapic_v1 -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.aiplatform_v1beta1.types import specialist_pool -from google.cloud.aiplatform_v1beta1.types import specialist_pool_service -from google.longrunning import operations_pb2 # type: ignore -from .base import SpecialistPoolServiceTransport, DEFAULT_CLIENT_INFO - - -class SpecialistPoolServiceGrpcTransport(SpecialistPoolServiceTransport): - """gRPC backend transport for SpecialistPoolService. - - A service for creating and managing Customer SpecialistPools. - When customers start Data Labeling jobs, they can reuse/create - Specialist Pools to bring their own Specialists to label the - data. Customers can add/remove Managers for the Specialist Pool - on Cloud console, then Managers will get email notifications to - manage Specialists and tasks on CrowdCompute console. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_specialist_pool(self) -> Callable[ - [specialist_pool_service.CreateSpecialistPoolRequest], - operations_pb2.Operation]: - r"""Return a callable for the create specialist pool method over gRPC. - - Creates a SpecialistPool. - - Returns: - Callable[[~.CreateSpecialistPoolRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_specialist_pool' not in self._stubs: - self._stubs['create_specialist_pool'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.SpecialistPoolService/CreateSpecialistPool', - request_serializer=specialist_pool_service.CreateSpecialistPoolRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_specialist_pool'] - - @property - def get_specialist_pool(self) -> Callable[ - [specialist_pool_service.GetSpecialistPoolRequest], - specialist_pool.SpecialistPool]: - r"""Return a callable for the get specialist pool method over gRPC. - - Gets a SpecialistPool. - - Returns: - Callable[[~.GetSpecialistPoolRequest], - ~.SpecialistPool]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_specialist_pool' not in self._stubs: - self._stubs['get_specialist_pool'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.SpecialistPoolService/GetSpecialistPool', - request_serializer=specialist_pool_service.GetSpecialistPoolRequest.serialize, - response_deserializer=specialist_pool.SpecialistPool.deserialize, - ) - return self._stubs['get_specialist_pool'] - - @property - def list_specialist_pools(self) -> Callable[ - [specialist_pool_service.ListSpecialistPoolsRequest], - specialist_pool_service.ListSpecialistPoolsResponse]: - r"""Return a callable for the list specialist pools method over gRPC. - - Lists SpecialistPools in a Location. - - Returns: - Callable[[~.ListSpecialistPoolsRequest], - ~.ListSpecialistPoolsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_specialist_pools' not in self._stubs: - self._stubs['list_specialist_pools'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.SpecialistPoolService/ListSpecialistPools', - request_serializer=specialist_pool_service.ListSpecialistPoolsRequest.serialize, - response_deserializer=specialist_pool_service.ListSpecialistPoolsResponse.deserialize, - ) - return self._stubs['list_specialist_pools'] - - @property - def delete_specialist_pool(self) -> Callable[ - [specialist_pool_service.DeleteSpecialistPoolRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete specialist pool method over gRPC. - - Deletes a SpecialistPool as well as all Specialists - in the pool. - - Returns: - Callable[[~.DeleteSpecialistPoolRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_specialist_pool' not in self._stubs: - self._stubs['delete_specialist_pool'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.SpecialistPoolService/DeleteSpecialistPool', - request_serializer=specialist_pool_service.DeleteSpecialistPoolRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_specialist_pool'] - - @property - def update_specialist_pool(self) -> Callable[ - [specialist_pool_service.UpdateSpecialistPoolRequest], - operations_pb2.Operation]: - r"""Return a callable for the update specialist pool method over gRPC. - - Updates a SpecialistPool. - - Returns: - Callable[[~.UpdateSpecialistPoolRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_specialist_pool' not in self._stubs: - self._stubs['update_specialist_pool'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.SpecialistPoolService/UpdateSpecialistPool', - request_serializer=specialist_pool_service.UpdateSpecialistPoolRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['update_specialist_pool'] - - def close(self): - self.grpc_channel.close() - -__all__ = ( - 'SpecialistPoolServiceGrpcTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc_asyncio.py deleted file mode 100644 index a03750bbf4..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/grpc_asyncio.py +++ /dev/null @@ -1,388 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers_async -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.aiplatform_v1beta1.types import specialist_pool -from google.cloud.aiplatform_v1beta1.types import specialist_pool_service -from google.longrunning import operations_pb2 # type: ignore -from .base import SpecialistPoolServiceTransport, DEFAULT_CLIENT_INFO -from .grpc import SpecialistPoolServiceGrpcTransport - - -class SpecialistPoolServiceGrpcAsyncIOTransport(SpecialistPoolServiceTransport): - """gRPC AsyncIO backend transport for SpecialistPoolService. - - A service for creating and managing Customer SpecialistPools. - When customers start Data Labeling jobs, they can reuse/create - Specialist Pools to bring their own Specialists to label the - data. Customers can add/remove Managers for the Specialist Pool - on Cloud console, then Managers will get email notifications to - manage Specialists and tasks on CrowdCompute console. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsAsyncClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_specialist_pool(self) -> Callable[ - [specialist_pool_service.CreateSpecialistPoolRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the create specialist pool method over gRPC. - - Creates a SpecialistPool. - - Returns: - Callable[[~.CreateSpecialistPoolRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_specialist_pool' not in self._stubs: - self._stubs['create_specialist_pool'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.SpecialistPoolService/CreateSpecialistPool', - request_serializer=specialist_pool_service.CreateSpecialistPoolRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_specialist_pool'] - - @property - def get_specialist_pool(self) -> Callable[ - [specialist_pool_service.GetSpecialistPoolRequest], - Awaitable[specialist_pool.SpecialistPool]]: - r"""Return a callable for the get specialist pool method over gRPC. - - Gets a SpecialistPool. - - Returns: - Callable[[~.GetSpecialistPoolRequest], - Awaitable[~.SpecialistPool]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_specialist_pool' not in self._stubs: - self._stubs['get_specialist_pool'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.SpecialistPoolService/GetSpecialistPool', - request_serializer=specialist_pool_service.GetSpecialistPoolRequest.serialize, - response_deserializer=specialist_pool.SpecialistPool.deserialize, - ) - return self._stubs['get_specialist_pool'] - - @property - def list_specialist_pools(self) -> Callable[ - [specialist_pool_service.ListSpecialistPoolsRequest], - Awaitable[specialist_pool_service.ListSpecialistPoolsResponse]]: - r"""Return a callable for the list specialist pools method over gRPC. - - Lists SpecialistPools in a Location. - - Returns: - Callable[[~.ListSpecialistPoolsRequest], - Awaitable[~.ListSpecialistPoolsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_specialist_pools' not in self._stubs: - self._stubs['list_specialist_pools'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.SpecialistPoolService/ListSpecialistPools', - request_serializer=specialist_pool_service.ListSpecialistPoolsRequest.serialize, - response_deserializer=specialist_pool_service.ListSpecialistPoolsResponse.deserialize, - ) - return self._stubs['list_specialist_pools'] - - @property - def delete_specialist_pool(self) -> Callable[ - [specialist_pool_service.DeleteSpecialistPoolRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete specialist pool method over gRPC. - - Deletes a SpecialistPool as well as all Specialists - in the pool. - - Returns: - Callable[[~.DeleteSpecialistPoolRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_specialist_pool' not in self._stubs: - self._stubs['delete_specialist_pool'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.SpecialistPoolService/DeleteSpecialistPool', - request_serializer=specialist_pool_service.DeleteSpecialistPoolRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_specialist_pool'] - - @property - def update_specialist_pool(self) -> Callable[ - [specialist_pool_service.UpdateSpecialistPoolRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the update specialist pool method over gRPC. - - Updates a SpecialistPool. - - Returns: - Callable[[~.UpdateSpecialistPoolRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_specialist_pool' not in self._stubs: - self._stubs['update_specialist_pool'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.SpecialistPoolService/UpdateSpecialistPool', - request_serializer=specialist_pool_service.UpdateSpecialistPoolRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['update_specialist_pool'] - - def close(self): - return self.grpc_channel.close() - - -__all__ = ( - 'SpecialistPoolServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/__init__.py deleted file mode 100644 index fa8edec482..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import TensorboardServiceClient -from .async_client import TensorboardServiceAsyncClient - -__all__ = ( - 'TensorboardServiceClient', - 'TensorboardServiceAsyncClient', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/async_client.py deleted file mode 100644 index 95961ef416..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/async_client.py +++ /dev/null @@ -1,2711 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, AsyncIterable, Awaitable, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core.client_options import ClientOptions -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.tensorboard_service import pagers -from google.cloud.aiplatform_v1beta1.types import encryption_spec -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.cloud.aiplatform_v1beta1.types import tensorboard -from google.cloud.aiplatform_v1beta1.types import tensorboard as gca_tensorboard -from google.cloud.aiplatform_v1beta1.types import tensorboard_data -from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment -from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment as gca_tensorboard_experiment -from google.cloud.aiplatform_v1beta1.types import tensorboard_run -from google.cloud.aiplatform_v1beta1.types import tensorboard_run as gca_tensorboard_run -from google.cloud.aiplatform_v1beta1.types import tensorboard_service -from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series -from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series as gca_tensorboard_time_series -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import TensorboardServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import TensorboardServiceGrpcAsyncIOTransport -from .client import TensorboardServiceClient - - -class TensorboardServiceAsyncClient: - """TensorboardService""" - - _client: TensorboardServiceClient - - DEFAULT_ENDPOINT = TensorboardServiceClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = TensorboardServiceClient.DEFAULT_MTLS_ENDPOINT - - tensorboard_path = staticmethod(TensorboardServiceClient.tensorboard_path) - parse_tensorboard_path = staticmethod(TensorboardServiceClient.parse_tensorboard_path) - tensorboard_experiment_path = staticmethod(TensorboardServiceClient.tensorboard_experiment_path) - parse_tensorboard_experiment_path = staticmethod(TensorboardServiceClient.parse_tensorboard_experiment_path) - tensorboard_run_path = staticmethod(TensorboardServiceClient.tensorboard_run_path) - parse_tensorboard_run_path = staticmethod(TensorboardServiceClient.parse_tensorboard_run_path) - tensorboard_time_series_path = staticmethod(TensorboardServiceClient.tensorboard_time_series_path) - parse_tensorboard_time_series_path = staticmethod(TensorboardServiceClient.parse_tensorboard_time_series_path) - common_billing_account_path = staticmethod(TensorboardServiceClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(TensorboardServiceClient.parse_common_billing_account_path) - common_folder_path = staticmethod(TensorboardServiceClient.common_folder_path) - parse_common_folder_path = staticmethod(TensorboardServiceClient.parse_common_folder_path) - common_organization_path = staticmethod(TensorboardServiceClient.common_organization_path) - parse_common_organization_path = staticmethod(TensorboardServiceClient.parse_common_organization_path) - common_project_path = staticmethod(TensorboardServiceClient.common_project_path) - parse_common_project_path = staticmethod(TensorboardServiceClient.parse_common_project_path) - common_location_path = staticmethod(TensorboardServiceClient.common_location_path) - parse_common_location_path = staticmethod(TensorboardServiceClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - TensorboardServiceAsyncClient: The constructed client. - """ - return TensorboardServiceClient.from_service_account_info.__func__(TensorboardServiceAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - TensorboardServiceAsyncClient: The constructed client. - """ - return TensorboardServiceClient.from_service_account_file.__func__(TensorboardServiceAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> TensorboardServiceTransport: - """Returns the transport used by the client instance. - - Returns: - TensorboardServiceTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(TensorboardServiceClient).get_transport_class, type(TensorboardServiceClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, TensorboardServiceTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the tensorboard service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.TensorboardServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = TensorboardServiceClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def create_tensorboard(self, - request: Union[tensorboard_service.CreateTensorboardRequest, dict] = None, - *, - parent: str = None, - tensorboard: gca_tensorboard.Tensorboard = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Creates a Tensorboard. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CreateTensorboardRequest, dict]): - The request object. Request message for - [TensorboardService.CreateTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboard]. - parent (:class:`str`): - Required. The resource name of the Location to create - the Tensorboard in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - tensorboard (:class:`google.cloud.aiplatform_v1beta1.types.Tensorboard`): - Required. The Tensorboard to create. - This corresponds to the ``tensorboard`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Tensorboard` Tensorboard is a physical database that stores users' training metrics. - A default Tensorboard is provided in each region of a - GCP project. If needed users can also create extra - Tensorboards in their projects. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, tensorboard]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.CreateTensorboardRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if tensorboard is not None: - request.tensorboard = tensorboard - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_tensorboard, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - gca_tensorboard.Tensorboard, - metadata_type=tensorboard_service.CreateTensorboardOperationMetadata, - ) - - # Done; return the response. - return response - - async def get_tensorboard(self, - request: Union[tensorboard_service.GetTensorboardRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> tensorboard.Tensorboard: - r"""Gets a Tensorboard. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.GetTensorboardRequest, dict]): - The request object. Request message for - [TensorboardService.GetTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboard]. - name (:class:`str`): - Required. The name of the Tensorboard resource. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Tensorboard: - Tensorboard is a physical database - that stores users' training metrics. A - default Tensorboard is provided in each - region of a GCP project. If needed users - can also create extra Tensorboards in - their projects. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.GetTensorboardRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_tensorboard, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_tensorboard(self, - request: Union[tensorboard_service.UpdateTensorboardRequest, dict] = None, - *, - tensorboard: gca_tensorboard.Tensorboard = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Updates a Tensorboard. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.UpdateTensorboardRequest, dict]): - The request object. Request message for - [TensorboardService.UpdateTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboard]. - tensorboard (:class:`google.cloud.aiplatform_v1beta1.types.Tensorboard`): - Required. The Tensorboard's ``name`` field is used to - identify the Tensorboard to be updated. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` - - This corresponds to the ``tensorboard`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. Field mask is used to specify the fields to be - overwritten in the Tensorboard resource by the update. - The fields specified in the update_mask are relative to - the resource, not the full request. A field will be - overwritten if it is in the mask. If the user does not - provide a mask then all fields will be overwritten if - new values are specified. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Tensorboard` Tensorboard is a physical database that stores users' training metrics. - A default Tensorboard is provided in each region of a - GCP project. If needed users can also create extra - Tensorboards in their projects. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([tensorboard, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.UpdateTensorboardRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if tensorboard is not None: - request.tensorboard = tensorboard - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_tensorboard, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("tensorboard.name", request.tensorboard.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - gca_tensorboard.Tensorboard, - metadata_type=tensorboard_service.UpdateTensorboardOperationMetadata, - ) - - # Done; return the response. - return response - - async def list_tensorboards(self, - request: Union[tensorboard_service.ListTensorboardsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListTensorboardsAsyncPager: - r"""Lists Tensorboards in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListTensorboardsRequest, dict]): - The request object. Request message for - [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboards]. - parent (:class:`str`): - Required. The resource name of the Location to list - Tensorboards. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardsAsyncPager: - Response message for - [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboards]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.ListTensorboardsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_tensorboards, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListTensorboardsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_tensorboard(self, - request: Union[tensorboard_service.DeleteTensorboardRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a Tensorboard. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.DeleteTensorboardRequest, dict]): - The request object. Request message for - [TensorboardService.DeleteTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboard]. - name (:class:`str`): - Required. The name of the Tensorboard to be deleted. - Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.DeleteTensorboardRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_tensorboard, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def create_tensorboard_experiment(self, - request: Union[tensorboard_service.CreateTensorboardExperimentRequest, dict] = None, - *, - parent: str = None, - tensorboard_experiment: gca_tensorboard_experiment.TensorboardExperiment = None, - tensorboard_experiment_id: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_tensorboard_experiment.TensorboardExperiment: - r"""Creates a TensorboardExperiment. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CreateTensorboardExperimentRequest, dict]): - The request object. Request message for - [TensorboardService.CreateTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardExperiment]. - parent (:class:`str`): - Required. The resource name of the Tensorboard to create - the TensorboardExperiment in. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - tensorboard_experiment (:class:`google.cloud.aiplatform_v1beta1.types.TensorboardExperiment`): - The TensorboardExperiment to create. - This corresponds to the ``tensorboard_experiment`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - tensorboard_experiment_id (:class:`str`): - Required. The ID to use for the Tensorboard experiment, - which will become the final component of the Tensorboard - experiment's resource name. - - This value should be 1-128 characters, and valid - characters are /[a-z][0-9]-/. - - This corresponds to the ``tensorboard_experiment_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.TensorboardExperiment: - A TensorboardExperiment is a group of - TensorboardRuns, that are typically the - results of a training job run, in a - Tensorboard. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, tensorboard_experiment, tensorboard_experiment_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.CreateTensorboardExperimentRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if tensorboard_experiment is not None: - request.tensorboard_experiment = tensorboard_experiment - if tensorboard_experiment_id is not None: - request.tensorboard_experiment_id = tensorboard_experiment_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_tensorboard_experiment, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_tensorboard_experiment(self, - request: Union[tensorboard_service.GetTensorboardExperimentRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> tensorboard_experiment.TensorboardExperiment: - r"""Gets a TensorboardExperiment. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.GetTensorboardExperimentRequest, dict]): - The request object. Request message for - [TensorboardService.GetTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardExperiment]. - name (:class:`str`): - Required. The name of the TensorboardExperiment - resource. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.TensorboardExperiment: - A TensorboardExperiment is a group of - TensorboardRuns, that are typically the - results of a training job run, in a - Tensorboard. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.GetTensorboardExperimentRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_tensorboard_experiment, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_tensorboard_experiment(self, - request: Union[tensorboard_service.UpdateTensorboardExperimentRequest, dict] = None, - *, - tensorboard_experiment: gca_tensorboard_experiment.TensorboardExperiment = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_tensorboard_experiment.TensorboardExperiment: - r"""Updates a TensorboardExperiment. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.UpdateTensorboardExperimentRequest, dict]): - The request object. Request message for - [TensorboardService.UpdateTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardExperiment]. - tensorboard_experiment (:class:`google.cloud.aiplatform_v1beta1.types.TensorboardExperiment`): - Required. The TensorboardExperiment's ``name`` field is - used to identify the TensorboardExperiment to be - updated. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` - - This corresponds to the ``tensorboard_experiment`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. Field mask is used to specify the fields to be - overwritten in the TensorboardExperiment resource by the - update. The fields specified in the update_mask are - relative to the resource, not the full request. A field - will be overwritten if it is in the mask. If the user - does not provide a mask then all fields will be - overwritten if new values are specified. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.TensorboardExperiment: - A TensorboardExperiment is a group of - TensorboardRuns, that are typically the - results of a training job run, in a - Tensorboard. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([tensorboard_experiment, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.UpdateTensorboardExperimentRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if tensorboard_experiment is not None: - request.tensorboard_experiment = tensorboard_experiment - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_tensorboard_experiment, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("tensorboard_experiment.name", request.tensorboard_experiment.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_tensorboard_experiments(self, - request: Union[tensorboard_service.ListTensorboardExperimentsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListTensorboardExperimentsAsyncPager: - r"""Lists TensorboardExperiments in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListTensorboardExperimentsRequest, dict]): - The request object. Request message for - [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardExperiments]. - parent (:class:`str`): - Required. The resource name of the - Tensorboard to list - TensorboardExperiments. Format: - 'projects/{project}/locations/{location}/tensorboards/{tensorboard}' - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardExperimentsAsyncPager: - Response message for - [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardExperiments]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.ListTensorboardExperimentsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_tensorboard_experiments, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListTensorboardExperimentsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_tensorboard_experiment(self, - request: Union[tensorboard_service.DeleteTensorboardExperimentRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a TensorboardExperiment. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.DeleteTensorboardExperimentRequest, dict]): - The request object. Request message for - [TensorboardService.DeleteTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardExperiment]. - name (:class:`str`): - Required. The name of the TensorboardExperiment to be - deleted. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.DeleteTensorboardExperimentRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_tensorboard_experiment, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def create_tensorboard_run(self, - request: Union[tensorboard_service.CreateTensorboardRunRequest, dict] = None, - *, - parent: str = None, - tensorboard_run: gca_tensorboard_run.TensorboardRun = None, - tensorboard_run_id: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_tensorboard_run.TensorboardRun: - r"""Creates a TensorboardRun. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CreateTensorboardRunRequest, dict]): - The request object. Request message for - [TensorboardService.CreateTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardRun]. - parent (:class:`str`): - Required. The resource name of the TensorboardExperiment - to create the TensorboardRun in. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - tensorboard_run (:class:`google.cloud.aiplatform_v1beta1.types.TensorboardRun`): - Required. The TensorboardRun to - create. - - This corresponds to the ``tensorboard_run`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - tensorboard_run_id (:class:`str`): - Required. The ID to use for the Tensorboard run, which - will become the final component of the Tensorboard run's - resource name. - - This value should be 1-128 characters, and valid - characters are /[a-z][0-9]-/. - - This corresponds to the ``tensorboard_run_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.TensorboardRun: - TensorboardRun maps to a specific - execution of a training job with a given - set of hyperparameter values, model - definition, dataset, etc - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, tensorboard_run, tensorboard_run_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.CreateTensorboardRunRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if tensorboard_run is not None: - request.tensorboard_run = tensorboard_run - if tensorboard_run_id is not None: - request.tensorboard_run_id = tensorboard_run_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_tensorboard_run, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def batch_create_tensorboard_runs(self, - request: Union[tensorboard_service.BatchCreateTensorboardRunsRequest, dict] = None, - *, - parent: str = None, - requests: Sequence[tensorboard_service.CreateTensorboardRunRequest] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> tensorboard_service.BatchCreateTensorboardRunsResponse: - r"""Batch create TensorboardRuns. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.BatchCreateTensorboardRunsRequest, dict]): - The request object. Request message for - [TensorboardService.BatchCreateTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardRuns]. - parent (:class:`str`): - Required. The resource name of the TensorboardExperiment - to create the TensorboardRuns in. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` - The parent field in the CreateTensorboardRunRequest - messages must match this field. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - requests (:class:`Sequence[google.cloud.aiplatform_v1beta1.types.CreateTensorboardRunRequest]`): - Required. The request message - specifying the TensorboardRuns to - create. A maximum of 1000 - TensorboardRuns can be created in a - batch. - - This corresponds to the ``requests`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.BatchCreateTensorboardRunsResponse: - Response message for - [TensorboardService.BatchCreateTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardRuns]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, requests]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.BatchCreateTensorboardRunsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if requests: - request.requests.extend(requests) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.batch_create_tensorboard_runs, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_tensorboard_run(self, - request: Union[tensorboard_service.GetTensorboardRunRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> tensorboard_run.TensorboardRun: - r"""Gets a TensorboardRun. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.GetTensorboardRunRequest, dict]): - The request object. Request message for - [TensorboardService.GetTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardRun]. - name (:class:`str`): - Required. The name of the TensorboardRun resource. - Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.TensorboardRun: - TensorboardRun maps to a specific - execution of a training job with a given - set of hyperparameter values, model - definition, dataset, etc - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.GetTensorboardRunRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_tensorboard_run, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_tensorboard_run(self, - request: Union[tensorboard_service.UpdateTensorboardRunRequest, dict] = None, - *, - tensorboard_run: gca_tensorboard_run.TensorboardRun = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_tensorboard_run.TensorboardRun: - r"""Updates a TensorboardRun. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.UpdateTensorboardRunRequest, dict]): - The request object. Request message for - [TensorboardService.UpdateTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardRun]. - tensorboard_run (:class:`google.cloud.aiplatform_v1beta1.types.TensorboardRun`): - Required. The TensorboardRun's ``name`` field is used to - identify the TensorboardRun to be updated. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` - - This corresponds to the ``tensorboard_run`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. Field mask is used to specify the fields to be - overwritten in the TensorboardRun resource by the - update. The fields specified in the update_mask are - relative to the resource, not the full request. A field - will be overwritten if it is in the mask. If the user - does not provide a mask then all fields will be - overwritten if new values are specified. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.TensorboardRun: - TensorboardRun maps to a specific - execution of a training job with a given - set of hyperparameter values, model - definition, dataset, etc - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([tensorboard_run, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.UpdateTensorboardRunRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if tensorboard_run is not None: - request.tensorboard_run = tensorboard_run - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_tensorboard_run, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("tensorboard_run.name", request.tensorboard_run.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_tensorboard_runs(self, - request: Union[tensorboard_service.ListTensorboardRunsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListTensorboardRunsAsyncPager: - r"""Lists TensorboardRuns in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListTensorboardRunsRequest, dict]): - The request object. Request message for - [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardRuns]. - parent (:class:`str`): - Required. The resource name of the - TensorboardExperiment to list - TensorboardRuns. Format: - 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}' - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardRunsAsyncPager: - Response message for - [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardRuns]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.ListTensorboardRunsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_tensorboard_runs, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListTensorboardRunsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_tensorboard_run(self, - request: Union[tensorboard_service.DeleteTensorboardRunRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a TensorboardRun. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.DeleteTensorboardRunRequest, dict]): - The request object. Request message for - [TensorboardService.DeleteTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardRun]. - name (:class:`str`): - Required. The name of the TensorboardRun to be deleted. - Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.DeleteTensorboardRunRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_tensorboard_run, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def batch_create_tensorboard_time_series(self, - request: Union[tensorboard_service.BatchCreateTensorboardTimeSeriesRequest, dict] = None, - *, - parent: str = None, - requests: Sequence[tensorboard_service.CreateTensorboardTimeSeriesRequest] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> tensorboard_service.BatchCreateTensorboardTimeSeriesResponse: - r"""Batch create TensorboardTimeSeries that belong to a - TensorboardExperiment. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.BatchCreateTensorboardTimeSeriesRequest, dict]): - The request object. Request message for - [TensorboardService.BatchCreateTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardTimeSeries]. - parent (:class:`str`): - Required. The resource name of the TensorboardExperiment - to create the TensorboardTimeSeries in. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` - The TensorboardRuns referenced by the parent fields in - the CreateTensorboardTimeSeriesRequest messages must be - sub resources of this TensorboardExperiment. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - requests (:class:`Sequence[google.cloud.aiplatform_v1beta1.types.CreateTensorboardTimeSeriesRequest]`): - Required. The request message - specifying the TensorboardTimeSeries to - create. A maximum of 1000 - TensorboardTimeSeries can be created in - a batch. - - This corresponds to the ``requests`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.BatchCreateTensorboardTimeSeriesResponse: - Response message for - [TensorboardService.BatchCreateTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardTimeSeries]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, requests]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.BatchCreateTensorboardTimeSeriesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if requests: - request.requests.extend(requests) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.batch_create_tensorboard_time_series, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def create_tensorboard_time_series(self, - request: Union[tensorboard_service.CreateTensorboardTimeSeriesRequest, dict] = None, - *, - parent: str = None, - tensorboard_time_series: gca_tensorboard_time_series.TensorboardTimeSeries = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_tensorboard_time_series.TensorboardTimeSeries: - r"""Creates a TensorboardTimeSeries. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CreateTensorboardTimeSeriesRequest, dict]): - The request object. Request message for - [TensorboardService.CreateTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardTimeSeries]. - parent (:class:`str`): - Required. The resource name of the TensorboardRun to - create the TensorboardTimeSeries in. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - tensorboard_time_series (:class:`google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries`): - Required. The TensorboardTimeSeries - to create. - - This corresponds to the ``tensorboard_time_series`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries: - TensorboardTimeSeries maps to times - series produced in training runs - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, tensorboard_time_series]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.CreateTensorboardTimeSeriesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if tensorboard_time_series is not None: - request.tensorboard_time_series = tensorboard_time_series - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_tensorboard_time_series, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_tensorboard_time_series(self, - request: Union[tensorboard_service.GetTensorboardTimeSeriesRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> tensorboard_time_series.TensorboardTimeSeries: - r"""Gets a TensorboardTimeSeries. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.GetTensorboardTimeSeriesRequest, dict]): - The request object. Request message for - [TensorboardService.GetTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardTimeSeries]. - name (:class:`str`): - Required. The name of the TensorboardTimeSeries - resource. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries: - TensorboardTimeSeries maps to times - series produced in training runs - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.GetTensorboardTimeSeriesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_tensorboard_time_series, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_tensorboard_time_series(self, - request: Union[tensorboard_service.UpdateTensorboardTimeSeriesRequest, dict] = None, - *, - tensorboard_time_series: gca_tensorboard_time_series.TensorboardTimeSeries = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_tensorboard_time_series.TensorboardTimeSeries: - r"""Updates a TensorboardTimeSeries. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.UpdateTensorboardTimeSeriesRequest, dict]): - The request object. Request message for - [TensorboardService.UpdateTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardTimeSeries]. - tensorboard_time_series (:class:`google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries`): - Required. The TensorboardTimeSeries' ``name`` field is - used to identify the TensorboardTimeSeries to be - updated. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` - - This corresponds to the ``tensorboard_time_series`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. Field mask is used to specify the fields to be - overwritten in the TensorboardTimeSeries resource by the - update. The fields specified in the update_mask are - relative to the resource, not the full request. A field - will be overwritten if it is in the mask. If the user - does not provide a mask then all fields will be - overwritten if new values are specified. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries: - TensorboardTimeSeries maps to times - series produced in training runs - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([tensorboard_time_series, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.UpdateTensorboardTimeSeriesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if tensorboard_time_series is not None: - request.tensorboard_time_series = tensorboard_time_series - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_tensorboard_time_series, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("tensorboard_time_series.name", request.tensorboard_time_series.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_tensorboard_time_series(self, - request: Union[tensorboard_service.ListTensorboardTimeSeriesRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListTensorboardTimeSeriesAsyncPager: - r"""Lists TensorboardTimeSeries in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListTensorboardTimeSeriesRequest, dict]): - The request object. Request message for - [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardTimeSeries]. - parent (:class:`str`): - Required. The resource name of the - TensorboardRun to list - TensorboardTimeSeries. Format: - 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}' - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardTimeSeriesAsyncPager: - Response message for - [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardTimeSeries]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.ListTensorboardTimeSeriesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_tensorboard_time_series, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListTensorboardTimeSeriesAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_tensorboard_time_series(self, - request: Union[tensorboard_service.DeleteTensorboardTimeSeriesRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Deletes a TensorboardTimeSeries. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.DeleteTensorboardTimeSeriesRequest, dict]): - The request object. Request message for - [TensorboardService.DeleteTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardTimeSeries]. - name (:class:`str`): - Required. The name of the TensorboardTimeSeries to be - deleted. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.DeleteTensorboardTimeSeriesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_tensorboard_time_series, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - async def batch_read_tensorboard_time_series_data(self, - request: Union[tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest, dict] = None, - *, - tensorboard: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse: - r"""Reads multiple TensorboardTimeSeries' data. The data - point number limit is 1000 for scalars, 100 for tensors - and blob references. If the number of data points stored - is less than the limit, all data will be returned. - Otherwise, that limit number of data points will be - randomly selected from this time series and returned. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.BatchReadTensorboardTimeSeriesDataRequest, dict]): - The request object. Request message for - [TensorboardService.BatchReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.BatchReadTensorboardTimeSeriesData]. - tensorboard (:class:`str`): - Required. The resource name of the Tensorboard - containing TensorboardTimeSeries to read data from. - Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}``. - The TensorboardTimeSeries referenced by - [time_series][google.cloud.aiplatform.v1beta1.BatchReadTensorboardTimeSeriesDataRequest.time_series] - must be sub resources of this Tensorboard. - - This corresponds to the ``tensorboard`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.BatchReadTensorboardTimeSeriesDataResponse: - Response message for - [TensorboardService.BatchReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.BatchReadTensorboardTimeSeriesData]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([tensorboard]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if tensorboard is not None: - request.tensorboard = tensorboard - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.batch_read_tensorboard_time_series_data, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("tensorboard", request.tensorboard), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def read_tensorboard_time_series_data(self, - request: Union[tensorboard_service.ReadTensorboardTimeSeriesDataRequest, dict] = None, - *, - tensorboard_time_series: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> tensorboard_service.ReadTensorboardTimeSeriesDataResponse: - r"""Reads a TensorboardTimeSeries' data. By default, if the number - of data points stored is less than 1000, all data will be - returned. Otherwise, 1000 data points will be randomly selected - from this time series and returned. This value can be changed by - changing max_data_points, which can't be greater than 10k. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ReadTensorboardTimeSeriesDataRequest, dict]): - The request object. Request message for - [TensorboardService.ReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardTimeSeriesData]. - tensorboard_time_series (:class:`str`): - Required. The resource name of the TensorboardTimeSeries - to read data from. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` - - This corresponds to the ``tensorboard_time_series`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.ReadTensorboardTimeSeriesDataResponse: - Response message for - [TensorboardService.ReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardTimeSeriesData]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([tensorboard_time_series]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.ReadTensorboardTimeSeriesDataRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if tensorboard_time_series is not None: - request.tensorboard_time_series = tensorboard_time_series - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.read_tensorboard_time_series_data, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("tensorboard_time_series", request.tensorboard_time_series), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def read_tensorboard_blob_data(self, - request: Union[tensorboard_service.ReadTensorboardBlobDataRequest, dict] = None, - *, - time_series: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> Awaitable[AsyncIterable[tensorboard_service.ReadTensorboardBlobDataResponse]]: - r"""Gets bytes of TensorboardBlobs. - This is to allow reading blob data stored in consumer - project's Cloud Storage bucket without users having to - obtain Cloud Storage access permission. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ReadTensorboardBlobDataRequest, dict]): - The request object. Request message for - [TensorboardService.ReadTensorboardBlobData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardBlobData]. - time_series (:class:`str`): - Required. The resource name of the TensorboardTimeSeries - to list Blobs. Format: - 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}' - - This corresponds to the ``time_series`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - AsyncIterable[google.cloud.aiplatform_v1beta1.types.ReadTensorboardBlobDataResponse]: - Response message for - [TensorboardService.ReadTensorboardBlobData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardBlobData]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([time_series]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.ReadTensorboardBlobDataRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if time_series is not None: - request.time_series = time_series - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.read_tensorboard_blob_data, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("time_series", request.time_series), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def write_tensorboard_experiment_data(self, - request: Union[tensorboard_service.WriteTensorboardExperimentDataRequest, dict] = None, - *, - tensorboard_experiment: str = None, - write_run_data_requests: Sequence[tensorboard_service.WriteTensorboardRunDataRequest] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> tensorboard_service.WriteTensorboardExperimentDataResponse: - r"""Write time series data points of multiple - TensorboardTimeSeries in multiple TensorboardRun's. If - any data fail to be ingested, an error will be returned. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.WriteTensorboardExperimentDataRequest, dict]): - The request object. Request message for - [TensorboardService.WriteTensorboardExperimentData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardExperimentData]. - tensorboard_experiment (:class:`str`): - Required. The resource name of the TensorboardExperiment - to write data to. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` - - This corresponds to the ``tensorboard_experiment`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - write_run_data_requests (:class:`Sequence[google.cloud.aiplatform_v1beta1.types.WriteTensorboardRunDataRequest]`): - Required. Requests containing per-run - TensorboardTimeSeries data to write. - - This corresponds to the ``write_run_data_requests`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.WriteTensorboardExperimentDataResponse: - Response message for - [TensorboardService.WriteTensorboardExperimentData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardExperimentData]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([tensorboard_experiment, write_run_data_requests]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.WriteTensorboardExperimentDataRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if tensorboard_experiment is not None: - request.tensorboard_experiment = tensorboard_experiment - if write_run_data_requests: - request.write_run_data_requests.extend(write_run_data_requests) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.write_tensorboard_experiment_data, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("tensorboard_experiment", request.tensorboard_experiment), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def write_tensorboard_run_data(self, - request: Union[tensorboard_service.WriteTensorboardRunDataRequest, dict] = None, - *, - tensorboard_run: str = None, - time_series_data: Sequence[tensorboard_data.TimeSeriesData] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> tensorboard_service.WriteTensorboardRunDataResponse: - r"""Write time series data points into multiple - TensorboardTimeSeries under a TensorboardRun. If any - data fail to be ingested, an error will be returned. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.WriteTensorboardRunDataRequest, dict]): - The request object. Request message for - [TensorboardService.WriteTensorboardRunData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardRunData]. - tensorboard_run (:class:`str`): - Required. The resource name of the TensorboardRun to - write data to. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` - - This corresponds to the ``tensorboard_run`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - time_series_data (:class:`Sequence[google.cloud.aiplatform_v1beta1.types.TimeSeriesData]`): - Required. The TensorboardTimeSeries - data to write. Values with in a time - series are indexed by their step value. - Repeated writes to the same step will - overwrite the existing value for that - step. - The upper limit of data points per write - request is 5000. - - This corresponds to the ``time_series_data`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.WriteTensorboardRunDataResponse: - Response message for - [TensorboardService.WriteTensorboardRunData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardRunData]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([tensorboard_run, time_series_data]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.WriteTensorboardRunDataRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if tensorboard_run is not None: - request.tensorboard_run = tensorboard_run - if time_series_data: - request.time_series_data.extend(time_series_data) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.write_tensorboard_run_data, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("tensorboard_run", request.tensorboard_run), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def export_tensorboard_time_series_data(self, - request: Union[tensorboard_service.ExportTensorboardTimeSeriesDataRequest, dict] = None, - *, - tensorboard_time_series: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ExportTensorboardTimeSeriesDataAsyncPager: - r"""Exports a TensorboardTimeSeries' data. Data is - returned in paginated responses. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ExportTensorboardTimeSeriesDataRequest, dict]): - The request object. Request message for - [TensorboardService.ExportTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ExportTensorboardTimeSeriesData]. - tensorboard_time_series (:class:`str`): - Required. The resource name of the TensorboardTimeSeries - to export data from. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` - - This corresponds to the ``tensorboard_time_series`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ExportTensorboardTimeSeriesDataAsyncPager: - Response message for - [TensorboardService.ExportTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ExportTensorboardTimeSeriesData]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([tensorboard_time_series]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = tensorboard_service.ExportTensorboardTimeSeriesDataRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if tensorboard_time_series is not None: - request.tensorboard_time_series = tensorboard_time_series - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.export_tensorboard_time_series_data, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("tensorboard_time_series", request.tensorboard_time_series), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ExportTensorboardTimeSeriesDataAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def __aenter__(self): - return self - - async def __aexit__(self, exc_type, exc, tb): - await self.transport.close() - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "TensorboardServiceAsyncClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/client.py deleted file mode 100644 index 2a72a22981..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/client.py +++ /dev/null @@ -1,2936 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import os -import re -from typing import Dict, Optional, Iterable, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api_core import operation as gac_operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.tensorboard_service import pagers -from google.cloud.aiplatform_v1beta1.types import encryption_spec -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.cloud.aiplatform_v1beta1.types import tensorboard -from google.cloud.aiplatform_v1beta1.types import tensorboard as gca_tensorboard -from google.cloud.aiplatform_v1beta1.types import tensorboard_data -from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment -from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment as gca_tensorboard_experiment -from google.cloud.aiplatform_v1beta1.types import tensorboard_run -from google.cloud.aiplatform_v1beta1.types import tensorboard_run as gca_tensorboard_run -from google.cloud.aiplatform_v1beta1.types import tensorboard_service -from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series -from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series as gca_tensorboard_time_series -from google.protobuf import empty_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import TensorboardServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import TensorboardServiceGrpcTransport -from .transports.grpc_asyncio import TensorboardServiceGrpcAsyncIOTransport - - -class TensorboardServiceClientMeta(type): - """Metaclass for the TensorboardService client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[TensorboardServiceTransport]] - _transport_registry["grpc"] = TensorboardServiceGrpcTransport - _transport_registry["grpc_asyncio"] = TensorboardServiceGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[TensorboardServiceTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class TensorboardServiceClient(metaclass=TensorboardServiceClientMeta): - """TensorboardService""" - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "aiplatform.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - TensorboardServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - TensorboardServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> TensorboardServiceTransport: - """Returns the transport used by the client instance. - - Returns: - TensorboardServiceTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def tensorboard_path(project: str,location: str,tensorboard: str,) -> str: - """Returns a fully-qualified tensorboard string.""" - return "projects/{project}/locations/{location}/tensorboards/{tensorboard}".format(project=project, location=location, tensorboard=tensorboard, ) - - @staticmethod - def parse_tensorboard_path(path: str) -> Dict[str,str]: - """Parses a tensorboard path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/tensorboards/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def tensorboard_experiment_path(project: str,location: str,tensorboard: str,experiment: str,) -> str: - """Returns a fully-qualified tensorboard_experiment string.""" - return "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}".format(project=project, location=location, tensorboard=tensorboard, experiment=experiment, ) - - @staticmethod - def parse_tensorboard_experiment_path(path: str) -> Dict[str,str]: - """Parses a tensorboard_experiment path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/tensorboards/(?P.+?)/experiments/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def tensorboard_run_path(project: str,location: str,tensorboard: str,experiment: str,run: str,) -> str: - """Returns a fully-qualified tensorboard_run string.""" - return "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}".format(project=project, location=location, tensorboard=tensorboard, experiment=experiment, run=run, ) - - @staticmethod - def parse_tensorboard_run_path(path: str) -> Dict[str,str]: - """Parses a tensorboard_run path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/tensorboards/(?P.+?)/experiments/(?P.+?)/runs/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def tensorboard_time_series_path(project: str,location: str,tensorboard: str,experiment: str,run: str,time_series: str,) -> str: - """Returns a fully-qualified tensorboard_time_series string.""" - return "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}".format(project=project, location=location, tensorboard=tensorboard, experiment=experiment, run=run, time_series=time_series, ) - - @staticmethod - def parse_tensorboard_time_series_path(path: str) -> Dict[str,str]: - """Parses a tensorboard_time_series path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/tensorboards/(?P.+?)/experiments/(?P.+?)/runs/(?P.+?)/timeSeries/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, TensorboardServiceTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the tensorboard service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, TensorboardServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): - raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, TensorboardServiceTransport): - # transport is a TensorboardServiceTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - always_use_jwt_access=True, - ) - - def create_tensorboard(self, - request: Union[tensorboard_service.CreateTensorboardRequest, dict] = None, - *, - parent: str = None, - tensorboard: gca_tensorboard.Tensorboard = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Creates a Tensorboard. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CreateTensorboardRequest, dict]): - The request object. Request message for - [TensorboardService.CreateTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboard]. - parent (str): - Required. The resource name of the Location to create - the Tensorboard in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - tensorboard (google.cloud.aiplatform_v1beta1.types.Tensorboard): - Required. The Tensorboard to create. - This corresponds to the ``tensorboard`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Tensorboard` Tensorboard is a physical database that stores users' training metrics. - A default Tensorboard is provided in each region of a - GCP project. If needed users can also create extra - Tensorboards in their projects. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, tensorboard]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.CreateTensorboardRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.CreateTensorboardRequest): - request = tensorboard_service.CreateTensorboardRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if tensorboard is not None: - request.tensorboard = tensorboard - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_tensorboard] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - gca_tensorboard.Tensorboard, - metadata_type=tensorboard_service.CreateTensorboardOperationMetadata, - ) - - # Done; return the response. - return response - - def get_tensorboard(self, - request: Union[tensorboard_service.GetTensorboardRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> tensorboard.Tensorboard: - r"""Gets a Tensorboard. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.GetTensorboardRequest, dict]): - The request object. Request message for - [TensorboardService.GetTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboard]. - name (str): - Required. The name of the Tensorboard resource. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Tensorboard: - Tensorboard is a physical database - that stores users' training metrics. A - default Tensorboard is provided in each - region of a GCP project. If needed users - can also create extra Tensorboards in - their projects. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.GetTensorboardRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.GetTensorboardRequest): - request = tensorboard_service.GetTensorboardRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_tensorboard] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_tensorboard(self, - request: Union[tensorboard_service.UpdateTensorboardRequest, dict] = None, - *, - tensorboard: gca_tensorboard.Tensorboard = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Updates a Tensorboard. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.UpdateTensorboardRequest, dict]): - The request object. Request message for - [TensorboardService.UpdateTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboard]. - tensorboard (google.cloud.aiplatform_v1beta1.types.Tensorboard): - Required. The Tensorboard's ``name`` field is used to - identify the Tensorboard to be updated. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` - - This corresponds to the ``tensorboard`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. Field mask is used to specify the fields to be - overwritten in the Tensorboard resource by the update. - The fields specified in the update_mask are relative to - the resource, not the full request. A field will be - overwritten if it is in the mask. If the user does not - provide a mask then all fields will be overwritten if - new values are specified. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Tensorboard` Tensorboard is a physical database that stores users' training metrics. - A default Tensorboard is provided in each region of a - GCP project. If needed users can also create extra - Tensorboards in their projects. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([tensorboard, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.UpdateTensorboardRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.UpdateTensorboardRequest): - request = tensorboard_service.UpdateTensorboardRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if tensorboard is not None: - request.tensorboard = tensorboard - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_tensorboard] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("tensorboard.name", request.tensorboard.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - gca_tensorboard.Tensorboard, - metadata_type=tensorboard_service.UpdateTensorboardOperationMetadata, - ) - - # Done; return the response. - return response - - def list_tensorboards(self, - request: Union[tensorboard_service.ListTensorboardsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListTensorboardsPager: - r"""Lists Tensorboards in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListTensorboardsRequest, dict]): - The request object. Request message for - [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboards]. - parent (str): - Required. The resource name of the Location to list - Tensorboards. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardsPager: - Response message for - [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboards]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.ListTensorboardsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.ListTensorboardsRequest): - request = tensorboard_service.ListTensorboardsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_tensorboards] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListTensorboardsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_tensorboard(self, - request: Union[tensorboard_service.DeleteTensorboardRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a Tensorboard. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.DeleteTensorboardRequest, dict]): - The request object. Request message for - [TensorboardService.DeleteTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboard]. - name (str): - Required. The name of the Tensorboard to be deleted. - Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.DeleteTensorboardRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.DeleteTensorboardRequest): - request = tensorboard_service.DeleteTensorboardRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_tensorboard] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def create_tensorboard_experiment(self, - request: Union[tensorboard_service.CreateTensorboardExperimentRequest, dict] = None, - *, - parent: str = None, - tensorboard_experiment: gca_tensorboard_experiment.TensorboardExperiment = None, - tensorboard_experiment_id: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_tensorboard_experiment.TensorboardExperiment: - r"""Creates a TensorboardExperiment. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CreateTensorboardExperimentRequest, dict]): - The request object. Request message for - [TensorboardService.CreateTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardExperiment]. - parent (str): - Required. The resource name of the Tensorboard to create - the TensorboardExperiment in. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - tensorboard_experiment (google.cloud.aiplatform_v1beta1.types.TensorboardExperiment): - The TensorboardExperiment to create. - This corresponds to the ``tensorboard_experiment`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - tensorboard_experiment_id (str): - Required. The ID to use for the Tensorboard experiment, - which will become the final component of the Tensorboard - experiment's resource name. - - This value should be 1-128 characters, and valid - characters are /[a-z][0-9]-/. - - This corresponds to the ``tensorboard_experiment_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.TensorboardExperiment: - A TensorboardExperiment is a group of - TensorboardRuns, that are typically the - results of a training job run, in a - Tensorboard. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, tensorboard_experiment, tensorboard_experiment_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.CreateTensorboardExperimentRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.CreateTensorboardExperimentRequest): - request = tensorboard_service.CreateTensorboardExperimentRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if tensorboard_experiment is not None: - request.tensorboard_experiment = tensorboard_experiment - if tensorboard_experiment_id is not None: - request.tensorboard_experiment_id = tensorboard_experiment_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_tensorboard_experiment] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_tensorboard_experiment(self, - request: Union[tensorboard_service.GetTensorboardExperimentRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> tensorboard_experiment.TensorboardExperiment: - r"""Gets a TensorboardExperiment. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.GetTensorboardExperimentRequest, dict]): - The request object. Request message for - [TensorboardService.GetTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardExperiment]. - name (str): - Required. The name of the TensorboardExperiment - resource. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.TensorboardExperiment: - A TensorboardExperiment is a group of - TensorboardRuns, that are typically the - results of a training job run, in a - Tensorboard. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.GetTensorboardExperimentRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.GetTensorboardExperimentRequest): - request = tensorboard_service.GetTensorboardExperimentRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_tensorboard_experiment] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_tensorboard_experiment(self, - request: Union[tensorboard_service.UpdateTensorboardExperimentRequest, dict] = None, - *, - tensorboard_experiment: gca_tensorboard_experiment.TensorboardExperiment = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_tensorboard_experiment.TensorboardExperiment: - r"""Updates a TensorboardExperiment. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.UpdateTensorboardExperimentRequest, dict]): - The request object. Request message for - [TensorboardService.UpdateTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardExperiment]. - tensorboard_experiment (google.cloud.aiplatform_v1beta1.types.TensorboardExperiment): - Required. The TensorboardExperiment's ``name`` field is - used to identify the TensorboardExperiment to be - updated. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` - - This corresponds to the ``tensorboard_experiment`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. Field mask is used to specify the fields to be - overwritten in the TensorboardExperiment resource by the - update. The fields specified in the update_mask are - relative to the resource, not the full request. A field - will be overwritten if it is in the mask. If the user - does not provide a mask then all fields will be - overwritten if new values are specified. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.TensorboardExperiment: - A TensorboardExperiment is a group of - TensorboardRuns, that are typically the - results of a training job run, in a - Tensorboard. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([tensorboard_experiment, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.UpdateTensorboardExperimentRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.UpdateTensorboardExperimentRequest): - request = tensorboard_service.UpdateTensorboardExperimentRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if tensorboard_experiment is not None: - request.tensorboard_experiment = tensorboard_experiment - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_tensorboard_experiment] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("tensorboard_experiment.name", request.tensorboard_experiment.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_tensorboard_experiments(self, - request: Union[tensorboard_service.ListTensorboardExperimentsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListTensorboardExperimentsPager: - r"""Lists TensorboardExperiments in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListTensorboardExperimentsRequest, dict]): - The request object. Request message for - [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardExperiments]. - parent (str): - Required. The resource name of the - Tensorboard to list - TensorboardExperiments. Format: - 'projects/{project}/locations/{location}/tensorboards/{tensorboard}' - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardExperimentsPager: - Response message for - [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardExperiments]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.ListTensorboardExperimentsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.ListTensorboardExperimentsRequest): - request = tensorboard_service.ListTensorboardExperimentsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_tensorboard_experiments] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListTensorboardExperimentsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_tensorboard_experiment(self, - request: Union[tensorboard_service.DeleteTensorboardExperimentRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a TensorboardExperiment. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.DeleteTensorboardExperimentRequest, dict]): - The request object. Request message for - [TensorboardService.DeleteTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardExperiment]. - name (str): - Required. The name of the TensorboardExperiment to be - deleted. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.DeleteTensorboardExperimentRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.DeleteTensorboardExperimentRequest): - request = tensorboard_service.DeleteTensorboardExperimentRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_tensorboard_experiment] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def create_tensorboard_run(self, - request: Union[tensorboard_service.CreateTensorboardRunRequest, dict] = None, - *, - parent: str = None, - tensorboard_run: gca_tensorboard_run.TensorboardRun = None, - tensorboard_run_id: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_tensorboard_run.TensorboardRun: - r"""Creates a TensorboardRun. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CreateTensorboardRunRequest, dict]): - The request object. Request message for - [TensorboardService.CreateTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardRun]. - parent (str): - Required. The resource name of the TensorboardExperiment - to create the TensorboardRun in. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - tensorboard_run (google.cloud.aiplatform_v1beta1.types.TensorboardRun): - Required. The TensorboardRun to - create. - - This corresponds to the ``tensorboard_run`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - tensorboard_run_id (str): - Required. The ID to use for the Tensorboard run, which - will become the final component of the Tensorboard run's - resource name. - - This value should be 1-128 characters, and valid - characters are /[a-z][0-9]-/. - - This corresponds to the ``tensorboard_run_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.TensorboardRun: - TensorboardRun maps to a specific - execution of a training job with a given - set of hyperparameter values, model - definition, dataset, etc - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, tensorboard_run, tensorboard_run_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.CreateTensorboardRunRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.CreateTensorboardRunRequest): - request = tensorboard_service.CreateTensorboardRunRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if tensorboard_run is not None: - request.tensorboard_run = tensorboard_run - if tensorboard_run_id is not None: - request.tensorboard_run_id = tensorboard_run_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_tensorboard_run] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def batch_create_tensorboard_runs(self, - request: Union[tensorboard_service.BatchCreateTensorboardRunsRequest, dict] = None, - *, - parent: str = None, - requests: Sequence[tensorboard_service.CreateTensorboardRunRequest] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> tensorboard_service.BatchCreateTensorboardRunsResponse: - r"""Batch create TensorboardRuns. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.BatchCreateTensorboardRunsRequest, dict]): - The request object. Request message for - [TensorboardService.BatchCreateTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardRuns]. - parent (str): - Required. The resource name of the TensorboardExperiment - to create the TensorboardRuns in. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` - The parent field in the CreateTensorboardRunRequest - messages must match this field. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - requests (Sequence[google.cloud.aiplatform_v1beta1.types.CreateTensorboardRunRequest]): - Required. The request message - specifying the TensorboardRuns to - create. A maximum of 1000 - TensorboardRuns can be created in a - batch. - - This corresponds to the ``requests`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.BatchCreateTensorboardRunsResponse: - Response message for - [TensorboardService.BatchCreateTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardRuns]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, requests]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.BatchCreateTensorboardRunsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.BatchCreateTensorboardRunsRequest): - request = tensorboard_service.BatchCreateTensorboardRunsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if requests is not None: - request.requests = requests - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.batch_create_tensorboard_runs] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_tensorboard_run(self, - request: Union[tensorboard_service.GetTensorboardRunRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> tensorboard_run.TensorboardRun: - r"""Gets a TensorboardRun. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.GetTensorboardRunRequest, dict]): - The request object. Request message for - [TensorboardService.GetTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardRun]. - name (str): - Required. The name of the TensorboardRun resource. - Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.TensorboardRun: - TensorboardRun maps to a specific - execution of a training job with a given - set of hyperparameter values, model - definition, dataset, etc - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.GetTensorboardRunRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.GetTensorboardRunRequest): - request = tensorboard_service.GetTensorboardRunRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_tensorboard_run] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_tensorboard_run(self, - request: Union[tensorboard_service.UpdateTensorboardRunRequest, dict] = None, - *, - tensorboard_run: gca_tensorboard_run.TensorboardRun = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_tensorboard_run.TensorboardRun: - r"""Updates a TensorboardRun. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.UpdateTensorboardRunRequest, dict]): - The request object. Request message for - [TensorboardService.UpdateTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardRun]. - tensorboard_run (google.cloud.aiplatform_v1beta1.types.TensorboardRun): - Required. The TensorboardRun's ``name`` field is used to - identify the TensorboardRun to be updated. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` - - This corresponds to the ``tensorboard_run`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. Field mask is used to specify the fields to be - overwritten in the TensorboardRun resource by the - update. The fields specified in the update_mask are - relative to the resource, not the full request. A field - will be overwritten if it is in the mask. If the user - does not provide a mask then all fields will be - overwritten if new values are specified. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.TensorboardRun: - TensorboardRun maps to a specific - execution of a training job with a given - set of hyperparameter values, model - definition, dataset, etc - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([tensorboard_run, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.UpdateTensorboardRunRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.UpdateTensorboardRunRequest): - request = tensorboard_service.UpdateTensorboardRunRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if tensorboard_run is not None: - request.tensorboard_run = tensorboard_run - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_tensorboard_run] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("tensorboard_run.name", request.tensorboard_run.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_tensorboard_runs(self, - request: Union[tensorboard_service.ListTensorboardRunsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListTensorboardRunsPager: - r"""Lists TensorboardRuns in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListTensorboardRunsRequest, dict]): - The request object. Request message for - [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardRuns]. - parent (str): - Required. The resource name of the - TensorboardExperiment to list - TensorboardRuns. Format: - 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}' - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardRunsPager: - Response message for - [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardRuns]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.ListTensorboardRunsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.ListTensorboardRunsRequest): - request = tensorboard_service.ListTensorboardRunsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_tensorboard_runs] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListTensorboardRunsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_tensorboard_run(self, - request: Union[tensorboard_service.DeleteTensorboardRunRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a TensorboardRun. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.DeleteTensorboardRunRequest, dict]): - The request object. Request message for - [TensorboardService.DeleteTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardRun]. - name (str): - Required. The name of the TensorboardRun to be deleted. - Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.DeleteTensorboardRunRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.DeleteTensorboardRunRequest): - request = tensorboard_service.DeleteTensorboardRunRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_tensorboard_run] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def batch_create_tensorboard_time_series(self, - request: Union[tensorboard_service.BatchCreateTensorboardTimeSeriesRequest, dict] = None, - *, - parent: str = None, - requests: Sequence[tensorboard_service.CreateTensorboardTimeSeriesRequest] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> tensorboard_service.BatchCreateTensorboardTimeSeriesResponse: - r"""Batch create TensorboardTimeSeries that belong to a - TensorboardExperiment. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.BatchCreateTensorboardTimeSeriesRequest, dict]): - The request object. Request message for - [TensorboardService.BatchCreateTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardTimeSeries]. - parent (str): - Required. The resource name of the TensorboardExperiment - to create the TensorboardTimeSeries in. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` - The TensorboardRuns referenced by the parent fields in - the CreateTensorboardTimeSeriesRequest messages must be - sub resources of this TensorboardExperiment. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - requests (Sequence[google.cloud.aiplatform_v1beta1.types.CreateTensorboardTimeSeriesRequest]): - Required. The request message - specifying the TensorboardTimeSeries to - create. A maximum of 1000 - TensorboardTimeSeries can be created in - a batch. - - This corresponds to the ``requests`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.BatchCreateTensorboardTimeSeriesResponse: - Response message for - [TensorboardService.BatchCreateTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardTimeSeries]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, requests]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.BatchCreateTensorboardTimeSeriesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.BatchCreateTensorboardTimeSeriesRequest): - request = tensorboard_service.BatchCreateTensorboardTimeSeriesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if requests is not None: - request.requests = requests - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.batch_create_tensorboard_time_series] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def create_tensorboard_time_series(self, - request: Union[tensorboard_service.CreateTensorboardTimeSeriesRequest, dict] = None, - *, - parent: str = None, - tensorboard_time_series: gca_tensorboard_time_series.TensorboardTimeSeries = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_tensorboard_time_series.TensorboardTimeSeries: - r"""Creates a TensorboardTimeSeries. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CreateTensorboardTimeSeriesRequest, dict]): - The request object. Request message for - [TensorboardService.CreateTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardTimeSeries]. - parent (str): - Required. The resource name of the TensorboardRun to - create the TensorboardTimeSeries in. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - tensorboard_time_series (google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries): - Required. The TensorboardTimeSeries - to create. - - This corresponds to the ``tensorboard_time_series`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries: - TensorboardTimeSeries maps to times - series produced in training runs - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, tensorboard_time_series]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.CreateTensorboardTimeSeriesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.CreateTensorboardTimeSeriesRequest): - request = tensorboard_service.CreateTensorboardTimeSeriesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if tensorboard_time_series is not None: - request.tensorboard_time_series = tensorboard_time_series - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_tensorboard_time_series] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_tensorboard_time_series(self, - request: Union[tensorboard_service.GetTensorboardTimeSeriesRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> tensorboard_time_series.TensorboardTimeSeries: - r"""Gets a TensorboardTimeSeries. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.GetTensorboardTimeSeriesRequest, dict]): - The request object. Request message for - [TensorboardService.GetTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardTimeSeries]. - name (str): - Required. The name of the TensorboardTimeSeries - resource. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries: - TensorboardTimeSeries maps to times - series produced in training runs - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.GetTensorboardTimeSeriesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.GetTensorboardTimeSeriesRequest): - request = tensorboard_service.GetTensorboardTimeSeriesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_tensorboard_time_series] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_tensorboard_time_series(self, - request: Union[tensorboard_service.UpdateTensorboardTimeSeriesRequest, dict] = None, - *, - tensorboard_time_series: gca_tensorboard_time_series.TensorboardTimeSeries = None, - update_mask: field_mask_pb2.FieldMask = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_tensorboard_time_series.TensorboardTimeSeries: - r"""Updates a TensorboardTimeSeries. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.UpdateTensorboardTimeSeriesRequest, dict]): - The request object. Request message for - [TensorboardService.UpdateTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardTimeSeries]. - tensorboard_time_series (google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries): - Required. The TensorboardTimeSeries' ``name`` field is - used to identify the TensorboardTimeSeries to be - updated. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` - - This corresponds to the ``tensorboard_time_series`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. Field mask is used to specify the fields to be - overwritten in the TensorboardTimeSeries resource by the - update. The fields specified in the update_mask are - relative to the resource, not the full request. A field - will be overwritten if it is in the mask. If the user - does not provide a mask then all fields will be - overwritten if new values are specified. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries: - TensorboardTimeSeries maps to times - series produced in training runs - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([tensorboard_time_series, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.UpdateTensorboardTimeSeriesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.UpdateTensorboardTimeSeriesRequest): - request = tensorboard_service.UpdateTensorboardTimeSeriesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if tensorboard_time_series is not None: - request.tensorboard_time_series = tensorboard_time_series - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_tensorboard_time_series] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("tensorboard_time_series.name", request.tensorboard_time_series.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_tensorboard_time_series(self, - request: Union[tensorboard_service.ListTensorboardTimeSeriesRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListTensorboardTimeSeriesPager: - r"""Lists TensorboardTimeSeries in a Location. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListTensorboardTimeSeriesRequest, dict]): - The request object. Request message for - [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardTimeSeries]. - parent (str): - Required. The resource name of the - TensorboardRun to list - TensorboardTimeSeries. Format: - 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}' - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardTimeSeriesPager: - Response message for - [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardTimeSeries]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.ListTensorboardTimeSeriesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.ListTensorboardTimeSeriesRequest): - request = tensorboard_service.ListTensorboardTimeSeriesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_tensorboard_time_series] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListTensorboardTimeSeriesPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_tensorboard_time_series(self, - request: Union[tensorboard_service.DeleteTensorboardTimeSeriesRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gac_operation.Operation: - r"""Deletes a TensorboardTimeSeries. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.DeleteTensorboardTimeSeriesRequest, dict]): - The request object. Request message for - [TensorboardService.DeleteTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardTimeSeries]. - name (str): - Required. The name of the TensorboardTimeSeries to be - deleted. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated - empty messages in your APIs. A typical example is to - use it as the request or the response type of an API - method. For instance: - - service Foo { - rpc Bar(google.protobuf.Empty) returns - (google.protobuf.Empty); - - } - - The JSON representation for Empty is empty JSON - object {}. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.DeleteTensorboardTimeSeriesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.DeleteTensorboardTimeSeriesRequest): - request = tensorboard_service.DeleteTensorboardTimeSeriesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_tensorboard_time_series] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = gac_operation.from_gapic( - response, - self._transport.operations_client, - empty_pb2.Empty, - metadata_type=gca_operation.DeleteOperationMetadata, - ) - - # Done; return the response. - return response - - def batch_read_tensorboard_time_series_data(self, - request: Union[tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest, dict] = None, - *, - tensorboard: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse: - r"""Reads multiple TensorboardTimeSeries' data. The data - point number limit is 1000 for scalars, 100 for tensors - and blob references. If the number of data points stored - is less than the limit, all data will be returned. - Otherwise, that limit number of data points will be - randomly selected from this time series and returned. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.BatchReadTensorboardTimeSeriesDataRequest, dict]): - The request object. Request message for - [TensorboardService.BatchReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.BatchReadTensorboardTimeSeriesData]. - tensorboard (str): - Required. The resource name of the Tensorboard - containing TensorboardTimeSeries to read data from. - Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}``. - The TensorboardTimeSeries referenced by - [time_series][google.cloud.aiplatform.v1beta1.BatchReadTensorboardTimeSeriesDataRequest.time_series] - must be sub resources of this Tensorboard. - - This corresponds to the ``tensorboard`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.BatchReadTensorboardTimeSeriesDataResponse: - Response message for - [TensorboardService.BatchReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.BatchReadTensorboardTimeSeriesData]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([tensorboard]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest): - request = tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if tensorboard is not None: - request.tensorboard = tensorboard - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.batch_read_tensorboard_time_series_data] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("tensorboard", request.tensorboard), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def read_tensorboard_time_series_data(self, - request: Union[tensorboard_service.ReadTensorboardTimeSeriesDataRequest, dict] = None, - *, - tensorboard_time_series: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> tensorboard_service.ReadTensorboardTimeSeriesDataResponse: - r"""Reads a TensorboardTimeSeries' data. By default, if the number - of data points stored is less than 1000, all data will be - returned. Otherwise, 1000 data points will be randomly selected - from this time series and returned. This value can be changed by - changing max_data_points, which can't be greater than 10k. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ReadTensorboardTimeSeriesDataRequest, dict]): - The request object. Request message for - [TensorboardService.ReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardTimeSeriesData]. - tensorboard_time_series (str): - Required. The resource name of the TensorboardTimeSeries - to read data from. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` - - This corresponds to the ``tensorboard_time_series`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.ReadTensorboardTimeSeriesDataResponse: - Response message for - [TensorboardService.ReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardTimeSeriesData]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([tensorboard_time_series]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.ReadTensorboardTimeSeriesDataRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.ReadTensorboardTimeSeriesDataRequest): - request = tensorboard_service.ReadTensorboardTimeSeriesDataRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if tensorboard_time_series is not None: - request.tensorboard_time_series = tensorboard_time_series - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.read_tensorboard_time_series_data] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("tensorboard_time_series", request.tensorboard_time_series), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def read_tensorboard_blob_data(self, - request: Union[tensorboard_service.ReadTensorboardBlobDataRequest, dict] = None, - *, - time_series: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> Iterable[tensorboard_service.ReadTensorboardBlobDataResponse]: - r"""Gets bytes of TensorboardBlobs. - This is to allow reading blob data stored in consumer - project's Cloud Storage bucket without users having to - obtain Cloud Storage access permission. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ReadTensorboardBlobDataRequest, dict]): - The request object. Request message for - [TensorboardService.ReadTensorboardBlobData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardBlobData]. - time_series (str): - Required. The resource name of the TensorboardTimeSeries - to list Blobs. Format: - 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}' - - This corresponds to the ``time_series`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - Iterable[google.cloud.aiplatform_v1beta1.types.ReadTensorboardBlobDataResponse]: - Response message for - [TensorboardService.ReadTensorboardBlobData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardBlobData]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([time_series]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.ReadTensorboardBlobDataRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.ReadTensorboardBlobDataRequest): - request = tensorboard_service.ReadTensorboardBlobDataRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if time_series is not None: - request.time_series = time_series - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.read_tensorboard_blob_data] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("time_series", request.time_series), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def write_tensorboard_experiment_data(self, - request: Union[tensorboard_service.WriteTensorboardExperimentDataRequest, dict] = None, - *, - tensorboard_experiment: str = None, - write_run_data_requests: Sequence[tensorboard_service.WriteTensorboardRunDataRequest] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> tensorboard_service.WriteTensorboardExperimentDataResponse: - r"""Write time series data points of multiple - TensorboardTimeSeries in multiple TensorboardRun's. If - any data fail to be ingested, an error will be returned. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.WriteTensorboardExperimentDataRequest, dict]): - The request object. Request message for - [TensorboardService.WriteTensorboardExperimentData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardExperimentData]. - tensorboard_experiment (str): - Required. The resource name of the TensorboardExperiment - to write data to. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` - - This corresponds to the ``tensorboard_experiment`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - write_run_data_requests (Sequence[google.cloud.aiplatform_v1beta1.types.WriteTensorboardRunDataRequest]): - Required. Requests containing per-run - TensorboardTimeSeries data to write. - - This corresponds to the ``write_run_data_requests`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.WriteTensorboardExperimentDataResponse: - Response message for - [TensorboardService.WriteTensorboardExperimentData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardExperimentData]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([tensorboard_experiment, write_run_data_requests]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.WriteTensorboardExperimentDataRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.WriteTensorboardExperimentDataRequest): - request = tensorboard_service.WriteTensorboardExperimentDataRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if tensorboard_experiment is not None: - request.tensorboard_experiment = tensorboard_experiment - if write_run_data_requests is not None: - request.write_run_data_requests = write_run_data_requests - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.write_tensorboard_experiment_data] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("tensorboard_experiment", request.tensorboard_experiment), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def write_tensorboard_run_data(self, - request: Union[tensorboard_service.WriteTensorboardRunDataRequest, dict] = None, - *, - tensorboard_run: str = None, - time_series_data: Sequence[tensorboard_data.TimeSeriesData] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> tensorboard_service.WriteTensorboardRunDataResponse: - r"""Write time series data points into multiple - TensorboardTimeSeries under a TensorboardRun. If any - data fail to be ingested, an error will be returned. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.WriteTensorboardRunDataRequest, dict]): - The request object. Request message for - [TensorboardService.WriteTensorboardRunData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardRunData]. - tensorboard_run (str): - Required. The resource name of the TensorboardRun to - write data to. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` - - This corresponds to the ``tensorboard_run`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - time_series_data (Sequence[google.cloud.aiplatform_v1beta1.types.TimeSeriesData]): - Required. The TensorboardTimeSeries - data to write. Values with in a time - series are indexed by their step value. - Repeated writes to the same step will - overwrite the existing value for that - step. - The upper limit of data points per write - request is 5000. - - This corresponds to the ``time_series_data`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.WriteTensorboardRunDataResponse: - Response message for - [TensorboardService.WriteTensorboardRunData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardRunData]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([tensorboard_run, time_series_data]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.WriteTensorboardRunDataRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.WriteTensorboardRunDataRequest): - request = tensorboard_service.WriteTensorboardRunDataRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if tensorboard_run is not None: - request.tensorboard_run = tensorboard_run - if time_series_data is not None: - request.time_series_data = time_series_data - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.write_tensorboard_run_data] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("tensorboard_run", request.tensorboard_run), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def export_tensorboard_time_series_data(self, - request: Union[tensorboard_service.ExportTensorboardTimeSeriesDataRequest, dict] = None, - *, - tensorboard_time_series: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ExportTensorboardTimeSeriesDataPager: - r"""Exports a TensorboardTimeSeries' data. Data is - returned in paginated responses. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ExportTensorboardTimeSeriesDataRequest, dict]): - The request object. Request message for - [TensorboardService.ExportTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ExportTensorboardTimeSeriesData]. - tensorboard_time_series (str): - Required. The resource name of the TensorboardTimeSeries - to export data from. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` - - This corresponds to the ``tensorboard_time_series`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ExportTensorboardTimeSeriesDataPager: - Response message for - [TensorboardService.ExportTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ExportTensorboardTimeSeriesData]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([tensorboard_time_series]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a tensorboard_service.ExportTensorboardTimeSeriesDataRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, tensorboard_service.ExportTensorboardTimeSeriesDataRequest): - request = tensorboard_service.ExportTensorboardTimeSeriesDataRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if tensorboard_time_series is not None: - request.tensorboard_time_series = tensorboard_time_series - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.export_tensorboard_time_series_data] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("tensorboard_time_series", request.tensorboard_time_series), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ExportTensorboardTimeSeriesDataPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - """Releases underlying transport's resources. - - .. warning:: - ONLY use as a context manager if the transport is NOT shared - with other clients! Exiting the with block will CLOSE the transport - and may cause errors in other clients! - """ - self.transport.close() - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "TensorboardServiceClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/pagers.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/pagers.py deleted file mode 100644 index 0b6ab9afcf..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/pagers.py +++ /dev/null @@ -1,633 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator - -from google.cloud.aiplatform_v1beta1.types import tensorboard -from google.cloud.aiplatform_v1beta1.types import tensorboard_data -from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment -from google.cloud.aiplatform_v1beta1.types import tensorboard_run -from google.cloud.aiplatform_v1beta1.types import tensorboard_service -from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series - - -class ListTensorboardsPager: - """A pager for iterating through ``list_tensorboards`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``tensorboards`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListTensorboards`` requests and continue to iterate - through the ``tensorboards`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., tensorboard_service.ListTensorboardsResponse], - request: tensorboard_service.ListTensorboardsRequest, - response: tensorboard_service.ListTensorboardsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListTensorboardsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListTensorboardsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = tensorboard_service.ListTensorboardsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[tensorboard_service.ListTensorboardsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[tensorboard.Tensorboard]: - for page in self.pages: - yield from page.tensorboards - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListTensorboardsAsyncPager: - """A pager for iterating through ``list_tensorboards`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``tensorboards`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListTensorboards`` requests and continue to iterate - through the ``tensorboards`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[tensorboard_service.ListTensorboardsResponse]], - request: tensorboard_service.ListTensorboardsRequest, - response: tensorboard_service.ListTensorboardsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListTensorboardsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListTensorboardsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = tensorboard_service.ListTensorboardsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[tensorboard_service.ListTensorboardsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[tensorboard.Tensorboard]: - async def async_generator(): - async for page in self.pages: - for response in page.tensorboards: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListTensorboardExperimentsPager: - """A pager for iterating through ``list_tensorboard_experiments`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardExperimentsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``tensorboard_experiments`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListTensorboardExperiments`` requests and continue to iterate - through the ``tensorboard_experiments`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardExperimentsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., tensorboard_service.ListTensorboardExperimentsResponse], - request: tensorboard_service.ListTensorboardExperimentsRequest, - response: tensorboard_service.ListTensorboardExperimentsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListTensorboardExperimentsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListTensorboardExperimentsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = tensorboard_service.ListTensorboardExperimentsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[tensorboard_service.ListTensorboardExperimentsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[tensorboard_experiment.TensorboardExperiment]: - for page in self.pages: - yield from page.tensorboard_experiments - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListTensorboardExperimentsAsyncPager: - """A pager for iterating through ``list_tensorboard_experiments`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardExperimentsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``tensorboard_experiments`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListTensorboardExperiments`` requests and continue to iterate - through the ``tensorboard_experiments`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardExperimentsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[tensorboard_service.ListTensorboardExperimentsResponse]], - request: tensorboard_service.ListTensorboardExperimentsRequest, - response: tensorboard_service.ListTensorboardExperimentsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListTensorboardExperimentsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListTensorboardExperimentsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = tensorboard_service.ListTensorboardExperimentsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[tensorboard_service.ListTensorboardExperimentsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[tensorboard_experiment.TensorboardExperiment]: - async def async_generator(): - async for page in self.pages: - for response in page.tensorboard_experiments: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListTensorboardRunsPager: - """A pager for iterating through ``list_tensorboard_runs`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardRunsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``tensorboard_runs`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListTensorboardRuns`` requests and continue to iterate - through the ``tensorboard_runs`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardRunsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., tensorboard_service.ListTensorboardRunsResponse], - request: tensorboard_service.ListTensorboardRunsRequest, - response: tensorboard_service.ListTensorboardRunsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListTensorboardRunsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListTensorboardRunsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = tensorboard_service.ListTensorboardRunsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[tensorboard_service.ListTensorboardRunsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[tensorboard_run.TensorboardRun]: - for page in self.pages: - yield from page.tensorboard_runs - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListTensorboardRunsAsyncPager: - """A pager for iterating through ``list_tensorboard_runs`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardRunsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``tensorboard_runs`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListTensorboardRuns`` requests and continue to iterate - through the ``tensorboard_runs`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardRunsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[tensorboard_service.ListTensorboardRunsResponse]], - request: tensorboard_service.ListTensorboardRunsRequest, - response: tensorboard_service.ListTensorboardRunsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListTensorboardRunsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListTensorboardRunsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = tensorboard_service.ListTensorboardRunsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[tensorboard_service.ListTensorboardRunsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[tensorboard_run.TensorboardRun]: - async def async_generator(): - async for page in self.pages: - for response in page.tensorboard_runs: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListTensorboardTimeSeriesPager: - """A pager for iterating through ``list_tensorboard_time_series`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardTimeSeriesResponse` object, and - provides an ``__iter__`` method to iterate through its - ``tensorboard_time_series`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListTensorboardTimeSeries`` requests and continue to iterate - through the ``tensorboard_time_series`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardTimeSeriesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., tensorboard_service.ListTensorboardTimeSeriesResponse], - request: tensorboard_service.ListTensorboardTimeSeriesRequest, - response: tensorboard_service.ListTensorboardTimeSeriesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListTensorboardTimeSeriesRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListTensorboardTimeSeriesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = tensorboard_service.ListTensorboardTimeSeriesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[tensorboard_service.ListTensorboardTimeSeriesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[tensorboard_time_series.TensorboardTimeSeries]: - for page in self.pages: - yield from page.tensorboard_time_series - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListTensorboardTimeSeriesAsyncPager: - """A pager for iterating through ``list_tensorboard_time_series`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardTimeSeriesResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``tensorboard_time_series`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListTensorboardTimeSeries`` requests and continue to iterate - through the ``tensorboard_time_series`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListTensorboardTimeSeriesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[tensorboard_service.ListTensorboardTimeSeriesResponse]], - request: tensorboard_service.ListTensorboardTimeSeriesRequest, - response: tensorboard_service.ListTensorboardTimeSeriesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListTensorboardTimeSeriesRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListTensorboardTimeSeriesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = tensorboard_service.ListTensorboardTimeSeriesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[tensorboard_service.ListTensorboardTimeSeriesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[tensorboard_time_series.TensorboardTimeSeries]: - async def async_generator(): - async for page in self.pages: - for response in page.tensorboard_time_series: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ExportTensorboardTimeSeriesDataPager: - """A pager for iterating through ``export_tensorboard_time_series_data`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ExportTensorboardTimeSeriesDataResponse` object, and - provides an ``__iter__`` method to iterate through its - ``time_series_data_points`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ExportTensorboardTimeSeriesData`` requests and continue to iterate - through the ``time_series_data_points`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ExportTensorboardTimeSeriesDataResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., tensorboard_service.ExportTensorboardTimeSeriesDataResponse], - request: tensorboard_service.ExportTensorboardTimeSeriesDataRequest, - response: tensorboard_service.ExportTensorboardTimeSeriesDataResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ExportTensorboardTimeSeriesDataRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ExportTensorboardTimeSeriesDataResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = tensorboard_service.ExportTensorboardTimeSeriesDataRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[tensorboard_service.ExportTensorboardTimeSeriesDataResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[tensorboard_data.TimeSeriesDataPoint]: - for page in self.pages: - yield from page.time_series_data_points - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ExportTensorboardTimeSeriesDataAsyncPager: - """A pager for iterating through ``export_tensorboard_time_series_data`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ExportTensorboardTimeSeriesDataResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``time_series_data_points`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ExportTensorboardTimeSeriesData`` requests and continue to iterate - through the ``time_series_data_points`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ExportTensorboardTimeSeriesDataResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[tensorboard_service.ExportTensorboardTimeSeriesDataResponse]], - request: tensorboard_service.ExportTensorboardTimeSeriesDataRequest, - response: tensorboard_service.ExportTensorboardTimeSeriesDataResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ExportTensorboardTimeSeriesDataRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ExportTensorboardTimeSeriesDataResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = tensorboard_service.ExportTensorboardTimeSeriesDataRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[tensorboard_service.ExportTensorboardTimeSeriesDataResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[tensorboard_data.TimeSeriesDataPoint]: - async def async_generator(): - async for page in self.pages: - for response in page.time_series_data_points: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/__init__.py deleted file mode 100644 index 9565b55932..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import TensorboardServiceTransport -from .grpc import TensorboardServiceGrpcTransport -from .grpc_asyncio import TensorboardServiceGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[TensorboardServiceTransport]] -_transport_registry['grpc'] = TensorboardServiceGrpcTransport -_transport_registry['grpc_asyncio'] = TensorboardServiceGrpcAsyncIOTransport - -__all__ = ( - 'TensorboardServiceTransport', - 'TensorboardServiceGrpcTransport', - 'TensorboardServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/base.py deleted file mode 100644 index 62486ac97f..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/base.py +++ /dev/null @@ -1,538 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import pkg_resources - -import google.auth # type: ignore -import google.api_core -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.aiplatform_v1beta1.types import tensorboard -from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment -from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment as gca_tensorboard_experiment -from google.cloud.aiplatform_v1beta1.types import tensorboard_run -from google.cloud.aiplatform_v1beta1.types import tensorboard_run as gca_tensorboard_run -from google.cloud.aiplatform_v1beta1.types import tensorboard_service -from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series -from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series as gca_tensorboard_time_series -from google.longrunning import operations_pb2 # type: ignore - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-aiplatform', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -class TensorboardServiceTransport(abc.ABC): - """Abstract transport class for TensorboardService.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'aiplatform.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials are service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.create_tensorboard: gapic_v1.method.wrap_method( - self.create_tensorboard, - default_timeout=None, - client_info=client_info, - ), - self.get_tensorboard: gapic_v1.method.wrap_method( - self.get_tensorboard, - default_timeout=None, - client_info=client_info, - ), - self.update_tensorboard: gapic_v1.method.wrap_method( - self.update_tensorboard, - default_timeout=None, - client_info=client_info, - ), - self.list_tensorboards: gapic_v1.method.wrap_method( - self.list_tensorboards, - default_timeout=None, - client_info=client_info, - ), - self.delete_tensorboard: gapic_v1.method.wrap_method( - self.delete_tensorboard, - default_timeout=None, - client_info=client_info, - ), - self.create_tensorboard_experiment: gapic_v1.method.wrap_method( - self.create_tensorboard_experiment, - default_timeout=None, - client_info=client_info, - ), - self.get_tensorboard_experiment: gapic_v1.method.wrap_method( - self.get_tensorboard_experiment, - default_timeout=None, - client_info=client_info, - ), - self.update_tensorboard_experiment: gapic_v1.method.wrap_method( - self.update_tensorboard_experiment, - default_timeout=None, - client_info=client_info, - ), - self.list_tensorboard_experiments: gapic_v1.method.wrap_method( - self.list_tensorboard_experiments, - default_timeout=None, - client_info=client_info, - ), - self.delete_tensorboard_experiment: gapic_v1.method.wrap_method( - self.delete_tensorboard_experiment, - default_timeout=None, - client_info=client_info, - ), - self.create_tensorboard_run: gapic_v1.method.wrap_method( - self.create_tensorboard_run, - default_timeout=None, - client_info=client_info, - ), - self.batch_create_tensorboard_runs: gapic_v1.method.wrap_method( - self.batch_create_tensorboard_runs, - default_timeout=None, - client_info=client_info, - ), - self.get_tensorboard_run: gapic_v1.method.wrap_method( - self.get_tensorboard_run, - default_timeout=None, - client_info=client_info, - ), - self.update_tensorboard_run: gapic_v1.method.wrap_method( - self.update_tensorboard_run, - default_timeout=None, - client_info=client_info, - ), - self.list_tensorboard_runs: gapic_v1.method.wrap_method( - self.list_tensorboard_runs, - default_timeout=None, - client_info=client_info, - ), - self.delete_tensorboard_run: gapic_v1.method.wrap_method( - self.delete_tensorboard_run, - default_timeout=None, - client_info=client_info, - ), - self.batch_create_tensorboard_time_series: gapic_v1.method.wrap_method( - self.batch_create_tensorboard_time_series, - default_timeout=None, - client_info=client_info, - ), - self.create_tensorboard_time_series: gapic_v1.method.wrap_method( - self.create_tensorboard_time_series, - default_timeout=None, - client_info=client_info, - ), - self.get_tensorboard_time_series: gapic_v1.method.wrap_method( - self.get_tensorboard_time_series, - default_timeout=None, - client_info=client_info, - ), - self.update_tensorboard_time_series: gapic_v1.method.wrap_method( - self.update_tensorboard_time_series, - default_timeout=None, - client_info=client_info, - ), - self.list_tensorboard_time_series: gapic_v1.method.wrap_method( - self.list_tensorboard_time_series, - default_timeout=None, - client_info=client_info, - ), - self.delete_tensorboard_time_series: gapic_v1.method.wrap_method( - self.delete_tensorboard_time_series, - default_timeout=None, - client_info=client_info, - ), - self.batch_read_tensorboard_time_series_data: gapic_v1.method.wrap_method( - self.batch_read_tensorboard_time_series_data, - default_timeout=None, - client_info=client_info, - ), - self.read_tensorboard_time_series_data: gapic_v1.method.wrap_method( - self.read_tensorboard_time_series_data, - default_timeout=None, - client_info=client_info, - ), - self.read_tensorboard_blob_data: gapic_v1.method.wrap_method( - self.read_tensorboard_blob_data, - default_timeout=None, - client_info=client_info, - ), - self.write_tensorboard_experiment_data: gapic_v1.method.wrap_method( - self.write_tensorboard_experiment_data, - default_timeout=None, - client_info=client_info, - ), - self.write_tensorboard_run_data: gapic_v1.method.wrap_method( - self.write_tensorboard_run_data, - default_timeout=None, - client_info=client_info, - ), - self.export_tensorboard_time_series_data: gapic_v1.method.wrap_method( - self.export_tensorboard_time_series_data, - default_timeout=None, - client_info=client_info, - ), - } - - def close(self): - """Closes resources associated with the transport. - - .. warning:: - Only call this method if the transport is NOT shared - with other clients - this may cause errors in other clients! - """ - raise NotImplementedError() - - @property - def operations_client(self): - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def create_tensorboard(self) -> Callable[ - [tensorboard_service.CreateTensorboardRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def get_tensorboard(self) -> Callable[ - [tensorboard_service.GetTensorboardRequest], - Union[ - tensorboard.Tensorboard, - Awaitable[tensorboard.Tensorboard] - ]]: - raise NotImplementedError() - - @property - def update_tensorboard(self) -> Callable[ - [tensorboard_service.UpdateTensorboardRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def list_tensorboards(self) -> Callable[ - [tensorboard_service.ListTensorboardsRequest], - Union[ - tensorboard_service.ListTensorboardsResponse, - Awaitable[tensorboard_service.ListTensorboardsResponse] - ]]: - raise NotImplementedError() - - @property - def delete_tensorboard(self) -> Callable[ - [tensorboard_service.DeleteTensorboardRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def create_tensorboard_experiment(self) -> Callable[ - [tensorboard_service.CreateTensorboardExperimentRequest], - Union[ - gca_tensorboard_experiment.TensorboardExperiment, - Awaitable[gca_tensorboard_experiment.TensorboardExperiment] - ]]: - raise NotImplementedError() - - @property - def get_tensorboard_experiment(self) -> Callable[ - [tensorboard_service.GetTensorboardExperimentRequest], - Union[ - tensorboard_experiment.TensorboardExperiment, - Awaitable[tensorboard_experiment.TensorboardExperiment] - ]]: - raise NotImplementedError() - - @property - def update_tensorboard_experiment(self) -> Callable[ - [tensorboard_service.UpdateTensorboardExperimentRequest], - Union[ - gca_tensorboard_experiment.TensorboardExperiment, - Awaitable[gca_tensorboard_experiment.TensorboardExperiment] - ]]: - raise NotImplementedError() - - @property - def list_tensorboard_experiments(self) -> Callable[ - [tensorboard_service.ListTensorboardExperimentsRequest], - Union[ - tensorboard_service.ListTensorboardExperimentsResponse, - Awaitable[tensorboard_service.ListTensorboardExperimentsResponse] - ]]: - raise NotImplementedError() - - @property - def delete_tensorboard_experiment(self) -> Callable[ - [tensorboard_service.DeleteTensorboardExperimentRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def create_tensorboard_run(self) -> Callable[ - [tensorboard_service.CreateTensorboardRunRequest], - Union[ - gca_tensorboard_run.TensorboardRun, - Awaitable[gca_tensorboard_run.TensorboardRun] - ]]: - raise NotImplementedError() - - @property - def batch_create_tensorboard_runs(self) -> Callable[ - [tensorboard_service.BatchCreateTensorboardRunsRequest], - Union[ - tensorboard_service.BatchCreateTensorboardRunsResponse, - Awaitable[tensorboard_service.BatchCreateTensorboardRunsResponse] - ]]: - raise NotImplementedError() - - @property - def get_tensorboard_run(self) -> Callable[ - [tensorboard_service.GetTensorboardRunRequest], - Union[ - tensorboard_run.TensorboardRun, - Awaitable[tensorboard_run.TensorboardRun] - ]]: - raise NotImplementedError() - - @property - def update_tensorboard_run(self) -> Callable[ - [tensorboard_service.UpdateTensorboardRunRequest], - Union[ - gca_tensorboard_run.TensorboardRun, - Awaitable[gca_tensorboard_run.TensorboardRun] - ]]: - raise NotImplementedError() - - @property - def list_tensorboard_runs(self) -> Callable[ - [tensorboard_service.ListTensorboardRunsRequest], - Union[ - tensorboard_service.ListTensorboardRunsResponse, - Awaitable[tensorboard_service.ListTensorboardRunsResponse] - ]]: - raise NotImplementedError() - - @property - def delete_tensorboard_run(self) -> Callable[ - [tensorboard_service.DeleteTensorboardRunRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def batch_create_tensorboard_time_series(self) -> Callable[ - [tensorboard_service.BatchCreateTensorboardTimeSeriesRequest], - Union[ - tensorboard_service.BatchCreateTensorboardTimeSeriesResponse, - Awaitable[tensorboard_service.BatchCreateTensorboardTimeSeriesResponse] - ]]: - raise NotImplementedError() - - @property - def create_tensorboard_time_series(self) -> Callable[ - [tensorboard_service.CreateTensorboardTimeSeriesRequest], - Union[ - gca_tensorboard_time_series.TensorboardTimeSeries, - Awaitable[gca_tensorboard_time_series.TensorboardTimeSeries] - ]]: - raise NotImplementedError() - - @property - def get_tensorboard_time_series(self) -> Callable[ - [tensorboard_service.GetTensorboardTimeSeriesRequest], - Union[ - tensorboard_time_series.TensorboardTimeSeries, - Awaitable[tensorboard_time_series.TensorboardTimeSeries] - ]]: - raise NotImplementedError() - - @property - def update_tensorboard_time_series(self) -> Callable[ - [tensorboard_service.UpdateTensorboardTimeSeriesRequest], - Union[ - gca_tensorboard_time_series.TensorboardTimeSeries, - Awaitable[gca_tensorboard_time_series.TensorboardTimeSeries] - ]]: - raise NotImplementedError() - - @property - def list_tensorboard_time_series(self) -> Callable[ - [tensorboard_service.ListTensorboardTimeSeriesRequest], - Union[ - tensorboard_service.ListTensorboardTimeSeriesResponse, - Awaitable[tensorboard_service.ListTensorboardTimeSeriesResponse] - ]]: - raise NotImplementedError() - - @property - def delete_tensorboard_time_series(self) -> Callable[ - [tensorboard_service.DeleteTensorboardTimeSeriesRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def batch_read_tensorboard_time_series_data(self) -> Callable[ - [tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest], - Union[ - tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse, - Awaitable[tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse] - ]]: - raise NotImplementedError() - - @property - def read_tensorboard_time_series_data(self) -> Callable[ - [tensorboard_service.ReadTensorboardTimeSeriesDataRequest], - Union[ - tensorboard_service.ReadTensorboardTimeSeriesDataResponse, - Awaitable[tensorboard_service.ReadTensorboardTimeSeriesDataResponse] - ]]: - raise NotImplementedError() - - @property - def read_tensorboard_blob_data(self) -> Callable[ - [tensorboard_service.ReadTensorboardBlobDataRequest], - Union[ - tensorboard_service.ReadTensorboardBlobDataResponse, - Awaitable[tensorboard_service.ReadTensorboardBlobDataResponse] - ]]: - raise NotImplementedError() - - @property - def write_tensorboard_experiment_data(self) -> Callable[ - [tensorboard_service.WriteTensorboardExperimentDataRequest], - Union[ - tensorboard_service.WriteTensorboardExperimentDataResponse, - Awaitable[tensorboard_service.WriteTensorboardExperimentDataResponse] - ]]: - raise NotImplementedError() - - @property - def write_tensorboard_run_data(self) -> Callable[ - [tensorboard_service.WriteTensorboardRunDataRequest], - Union[ - tensorboard_service.WriteTensorboardRunDataResponse, - Awaitable[tensorboard_service.WriteTensorboardRunDataResponse] - ]]: - raise NotImplementedError() - - @property - def export_tensorboard_time_series_data(self) -> Callable[ - [tensorboard_service.ExportTensorboardTimeSeriesDataRequest], - Union[ - tensorboard_service.ExportTensorboardTimeSeriesDataResponse, - Awaitable[tensorboard_service.ExportTensorboardTimeSeriesDataResponse] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'TensorboardServiceTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc.py deleted file mode 100644 index d51cf78cb4..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc.py +++ /dev/null @@ -1,1005 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers -from google.api_core import operations_v1 -from google.api_core import gapic_v1 -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.aiplatform_v1beta1.types import tensorboard -from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment -from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment as gca_tensorboard_experiment -from google.cloud.aiplatform_v1beta1.types import tensorboard_run -from google.cloud.aiplatform_v1beta1.types import tensorboard_run as gca_tensorboard_run -from google.cloud.aiplatform_v1beta1.types import tensorboard_service -from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series -from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series as gca_tensorboard_time_series -from google.longrunning import operations_pb2 # type: ignore -from .base import TensorboardServiceTransport, DEFAULT_CLIENT_INFO - - -class TensorboardServiceGrpcTransport(TensorboardServiceTransport): - """gRPC backend transport for TensorboardService. - - TensorboardService - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_tensorboard(self) -> Callable[ - [tensorboard_service.CreateTensorboardRequest], - operations_pb2.Operation]: - r"""Return a callable for the create tensorboard method over gRPC. - - Creates a Tensorboard. - - Returns: - Callable[[~.CreateTensorboardRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_tensorboard' not in self._stubs: - self._stubs['create_tensorboard'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/CreateTensorboard', - request_serializer=tensorboard_service.CreateTensorboardRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_tensorboard'] - - @property - def get_tensorboard(self) -> Callable[ - [tensorboard_service.GetTensorboardRequest], - tensorboard.Tensorboard]: - r"""Return a callable for the get tensorboard method over gRPC. - - Gets a Tensorboard. - - Returns: - Callable[[~.GetTensorboardRequest], - ~.Tensorboard]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_tensorboard' not in self._stubs: - self._stubs['get_tensorboard'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/GetTensorboard', - request_serializer=tensorboard_service.GetTensorboardRequest.serialize, - response_deserializer=tensorboard.Tensorboard.deserialize, - ) - return self._stubs['get_tensorboard'] - - @property - def update_tensorboard(self) -> Callable[ - [tensorboard_service.UpdateTensorboardRequest], - operations_pb2.Operation]: - r"""Return a callable for the update tensorboard method over gRPC. - - Updates a Tensorboard. - - Returns: - Callable[[~.UpdateTensorboardRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_tensorboard' not in self._stubs: - self._stubs['update_tensorboard'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/UpdateTensorboard', - request_serializer=tensorboard_service.UpdateTensorboardRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['update_tensorboard'] - - @property - def list_tensorboards(self) -> Callable[ - [tensorboard_service.ListTensorboardsRequest], - tensorboard_service.ListTensorboardsResponse]: - r"""Return a callable for the list tensorboards method over gRPC. - - Lists Tensorboards in a Location. - - Returns: - Callable[[~.ListTensorboardsRequest], - ~.ListTensorboardsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_tensorboards' not in self._stubs: - self._stubs['list_tensorboards'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/ListTensorboards', - request_serializer=tensorboard_service.ListTensorboardsRequest.serialize, - response_deserializer=tensorboard_service.ListTensorboardsResponse.deserialize, - ) - return self._stubs['list_tensorboards'] - - @property - def delete_tensorboard(self) -> Callable[ - [tensorboard_service.DeleteTensorboardRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete tensorboard method over gRPC. - - Deletes a Tensorboard. - - Returns: - Callable[[~.DeleteTensorboardRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_tensorboard' not in self._stubs: - self._stubs['delete_tensorboard'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/DeleteTensorboard', - request_serializer=tensorboard_service.DeleteTensorboardRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_tensorboard'] - - @property - def create_tensorboard_experiment(self) -> Callable[ - [tensorboard_service.CreateTensorboardExperimentRequest], - gca_tensorboard_experiment.TensorboardExperiment]: - r"""Return a callable for the create tensorboard experiment method over gRPC. - - Creates a TensorboardExperiment. - - Returns: - Callable[[~.CreateTensorboardExperimentRequest], - ~.TensorboardExperiment]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_tensorboard_experiment' not in self._stubs: - self._stubs['create_tensorboard_experiment'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/CreateTensorboardExperiment', - request_serializer=tensorboard_service.CreateTensorboardExperimentRequest.serialize, - response_deserializer=gca_tensorboard_experiment.TensorboardExperiment.deserialize, - ) - return self._stubs['create_tensorboard_experiment'] - - @property - def get_tensorboard_experiment(self) -> Callable[ - [tensorboard_service.GetTensorboardExperimentRequest], - tensorboard_experiment.TensorboardExperiment]: - r"""Return a callable for the get tensorboard experiment method over gRPC. - - Gets a TensorboardExperiment. - - Returns: - Callable[[~.GetTensorboardExperimentRequest], - ~.TensorboardExperiment]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_tensorboard_experiment' not in self._stubs: - self._stubs['get_tensorboard_experiment'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/GetTensorboardExperiment', - request_serializer=tensorboard_service.GetTensorboardExperimentRequest.serialize, - response_deserializer=tensorboard_experiment.TensorboardExperiment.deserialize, - ) - return self._stubs['get_tensorboard_experiment'] - - @property - def update_tensorboard_experiment(self) -> Callable[ - [tensorboard_service.UpdateTensorboardExperimentRequest], - gca_tensorboard_experiment.TensorboardExperiment]: - r"""Return a callable for the update tensorboard experiment method over gRPC. - - Updates a TensorboardExperiment. - - Returns: - Callable[[~.UpdateTensorboardExperimentRequest], - ~.TensorboardExperiment]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_tensorboard_experiment' not in self._stubs: - self._stubs['update_tensorboard_experiment'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/UpdateTensorboardExperiment', - request_serializer=tensorboard_service.UpdateTensorboardExperimentRequest.serialize, - response_deserializer=gca_tensorboard_experiment.TensorboardExperiment.deserialize, - ) - return self._stubs['update_tensorboard_experiment'] - - @property - def list_tensorboard_experiments(self) -> Callable[ - [tensorboard_service.ListTensorboardExperimentsRequest], - tensorboard_service.ListTensorboardExperimentsResponse]: - r"""Return a callable for the list tensorboard experiments method over gRPC. - - Lists TensorboardExperiments in a Location. - - Returns: - Callable[[~.ListTensorboardExperimentsRequest], - ~.ListTensorboardExperimentsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_tensorboard_experiments' not in self._stubs: - self._stubs['list_tensorboard_experiments'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/ListTensorboardExperiments', - request_serializer=tensorboard_service.ListTensorboardExperimentsRequest.serialize, - response_deserializer=tensorboard_service.ListTensorboardExperimentsResponse.deserialize, - ) - return self._stubs['list_tensorboard_experiments'] - - @property - def delete_tensorboard_experiment(self) -> Callable[ - [tensorboard_service.DeleteTensorboardExperimentRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete tensorboard experiment method over gRPC. - - Deletes a TensorboardExperiment. - - Returns: - Callable[[~.DeleteTensorboardExperimentRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_tensorboard_experiment' not in self._stubs: - self._stubs['delete_tensorboard_experiment'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/DeleteTensorboardExperiment', - request_serializer=tensorboard_service.DeleteTensorboardExperimentRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_tensorboard_experiment'] - - @property - def create_tensorboard_run(self) -> Callable[ - [tensorboard_service.CreateTensorboardRunRequest], - gca_tensorboard_run.TensorboardRun]: - r"""Return a callable for the create tensorboard run method over gRPC. - - Creates a TensorboardRun. - - Returns: - Callable[[~.CreateTensorboardRunRequest], - ~.TensorboardRun]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_tensorboard_run' not in self._stubs: - self._stubs['create_tensorboard_run'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/CreateTensorboardRun', - request_serializer=tensorboard_service.CreateTensorboardRunRequest.serialize, - response_deserializer=gca_tensorboard_run.TensorboardRun.deserialize, - ) - return self._stubs['create_tensorboard_run'] - - @property - def batch_create_tensorboard_runs(self) -> Callable[ - [tensorboard_service.BatchCreateTensorboardRunsRequest], - tensorboard_service.BatchCreateTensorboardRunsResponse]: - r"""Return a callable for the batch create tensorboard runs method over gRPC. - - Batch create TensorboardRuns. - - Returns: - Callable[[~.BatchCreateTensorboardRunsRequest], - ~.BatchCreateTensorboardRunsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'batch_create_tensorboard_runs' not in self._stubs: - self._stubs['batch_create_tensorboard_runs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/BatchCreateTensorboardRuns', - request_serializer=tensorboard_service.BatchCreateTensorboardRunsRequest.serialize, - response_deserializer=tensorboard_service.BatchCreateTensorboardRunsResponse.deserialize, - ) - return self._stubs['batch_create_tensorboard_runs'] - - @property - def get_tensorboard_run(self) -> Callable[ - [tensorboard_service.GetTensorboardRunRequest], - tensorboard_run.TensorboardRun]: - r"""Return a callable for the get tensorboard run method over gRPC. - - Gets a TensorboardRun. - - Returns: - Callable[[~.GetTensorboardRunRequest], - ~.TensorboardRun]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_tensorboard_run' not in self._stubs: - self._stubs['get_tensorboard_run'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/GetTensorboardRun', - request_serializer=tensorboard_service.GetTensorboardRunRequest.serialize, - response_deserializer=tensorboard_run.TensorboardRun.deserialize, - ) - return self._stubs['get_tensorboard_run'] - - @property - def update_tensorboard_run(self) -> Callable[ - [tensorboard_service.UpdateTensorboardRunRequest], - gca_tensorboard_run.TensorboardRun]: - r"""Return a callable for the update tensorboard run method over gRPC. - - Updates a TensorboardRun. - - Returns: - Callable[[~.UpdateTensorboardRunRequest], - ~.TensorboardRun]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_tensorboard_run' not in self._stubs: - self._stubs['update_tensorboard_run'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/UpdateTensorboardRun', - request_serializer=tensorboard_service.UpdateTensorboardRunRequest.serialize, - response_deserializer=gca_tensorboard_run.TensorboardRun.deserialize, - ) - return self._stubs['update_tensorboard_run'] - - @property - def list_tensorboard_runs(self) -> Callable[ - [tensorboard_service.ListTensorboardRunsRequest], - tensorboard_service.ListTensorboardRunsResponse]: - r"""Return a callable for the list tensorboard runs method over gRPC. - - Lists TensorboardRuns in a Location. - - Returns: - Callable[[~.ListTensorboardRunsRequest], - ~.ListTensorboardRunsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_tensorboard_runs' not in self._stubs: - self._stubs['list_tensorboard_runs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/ListTensorboardRuns', - request_serializer=tensorboard_service.ListTensorboardRunsRequest.serialize, - response_deserializer=tensorboard_service.ListTensorboardRunsResponse.deserialize, - ) - return self._stubs['list_tensorboard_runs'] - - @property - def delete_tensorboard_run(self) -> Callable[ - [tensorboard_service.DeleteTensorboardRunRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete tensorboard run method over gRPC. - - Deletes a TensorboardRun. - - Returns: - Callable[[~.DeleteTensorboardRunRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_tensorboard_run' not in self._stubs: - self._stubs['delete_tensorboard_run'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/DeleteTensorboardRun', - request_serializer=tensorboard_service.DeleteTensorboardRunRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_tensorboard_run'] - - @property - def batch_create_tensorboard_time_series(self) -> Callable[ - [tensorboard_service.BatchCreateTensorboardTimeSeriesRequest], - tensorboard_service.BatchCreateTensorboardTimeSeriesResponse]: - r"""Return a callable for the batch create tensorboard time - series method over gRPC. - - Batch create TensorboardTimeSeries that belong to a - TensorboardExperiment. - - Returns: - Callable[[~.BatchCreateTensorboardTimeSeriesRequest], - ~.BatchCreateTensorboardTimeSeriesResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'batch_create_tensorboard_time_series' not in self._stubs: - self._stubs['batch_create_tensorboard_time_series'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/BatchCreateTensorboardTimeSeries', - request_serializer=tensorboard_service.BatchCreateTensorboardTimeSeriesRequest.serialize, - response_deserializer=tensorboard_service.BatchCreateTensorboardTimeSeriesResponse.deserialize, - ) - return self._stubs['batch_create_tensorboard_time_series'] - - @property - def create_tensorboard_time_series(self) -> Callable[ - [tensorboard_service.CreateTensorboardTimeSeriesRequest], - gca_tensorboard_time_series.TensorboardTimeSeries]: - r"""Return a callable for the create tensorboard time series method over gRPC. - - Creates a TensorboardTimeSeries. - - Returns: - Callable[[~.CreateTensorboardTimeSeriesRequest], - ~.TensorboardTimeSeries]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_tensorboard_time_series' not in self._stubs: - self._stubs['create_tensorboard_time_series'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/CreateTensorboardTimeSeries', - request_serializer=tensorboard_service.CreateTensorboardTimeSeriesRequest.serialize, - response_deserializer=gca_tensorboard_time_series.TensorboardTimeSeries.deserialize, - ) - return self._stubs['create_tensorboard_time_series'] - - @property - def get_tensorboard_time_series(self) -> Callable[ - [tensorboard_service.GetTensorboardTimeSeriesRequest], - tensorboard_time_series.TensorboardTimeSeries]: - r"""Return a callable for the get tensorboard time series method over gRPC. - - Gets a TensorboardTimeSeries. - - Returns: - Callable[[~.GetTensorboardTimeSeriesRequest], - ~.TensorboardTimeSeries]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_tensorboard_time_series' not in self._stubs: - self._stubs['get_tensorboard_time_series'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/GetTensorboardTimeSeries', - request_serializer=tensorboard_service.GetTensorboardTimeSeriesRequest.serialize, - response_deserializer=tensorboard_time_series.TensorboardTimeSeries.deserialize, - ) - return self._stubs['get_tensorboard_time_series'] - - @property - def update_tensorboard_time_series(self) -> Callable[ - [tensorboard_service.UpdateTensorboardTimeSeriesRequest], - gca_tensorboard_time_series.TensorboardTimeSeries]: - r"""Return a callable for the update tensorboard time series method over gRPC. - - Updates a TensorboardTimeSeries. - - Returns: - Callable[[~.UpdateTensorboardTimeSeriesRequest], - ~.TensorboardTimeSeries]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_tensorboard_time_series' not in self._stubs: - self._stubs['update_tensorboard_time_series'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/UpdateTensorboardTimeSeries', - request_serializer=tensorboard_service.UpdateTensorboardTimeSeriesRequest.serialize, - response_deserializer=gca_tensorboard_time_series.TensorboardTimeSeries.deserialize, - ) - return self._stubs['update_tensorboard_time_series'] - - @property - def list_tensorboard_time_series(self) -> Callable[ - [tensorboard_service.ListTensorboardTimeSeriesRequest], - tensorboard_service.ListTensorboardTimeSeriesResponse]: - r"""Return a callable for the list tensorboard time series method over gRPC. - - Lists TensorboardTimeSeries in a Location. - - Returns: - Callable[[~.ListTensorboardTimeSeriesRequest], - ~.ListTensorboardTimeSeriesResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_tensorboard_time_series' not in self._stubs: - self._stubs['list_tensorboard_time_series'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/ListTensorboardTimeSeries', - request_serializer=tensorboard_service.ListTensorboardTimeSeriesRequest.serialize, - response_deserializer=tensorboard_service.ListTensorboardTimeSeriesResponse.deserialize, - ) - return self._stubs['list_tensorboard_time_series'] - - @property - def delete_tensorboard_time_series(self) -> Callable[ - [tensorboard_service.DeleteTensorboardTimeSeriesRequest], - operations_pb2.Operation]: - r"""Return a callable for the delete tensorboard time series method over gRPC. - - Deletes a TensorboardTimeSeries. - - Returns: - Callable[[~.DeleteTensorboardTimeSeriesRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_tensorboard_time_series' not in self._stubs: - self._stubs['delete_tensorboard_time_series'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/DeleteTensorboardTimeSeries', - request_serializer=tensorboard_service.DeleteTensorboardTimeSeriesRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_tensorboard_time_series'] - - @property - def batch_read_tensorboard_time_series_data(self) -> Callable[ - [tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest], - tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse]: - r"""Return a callable for the batch read tensorboard time - series data method over gRPC. - - Reads multiple TensorboardTimeSeries' data. The data - point number limit is 1000 for scalars, 100 for tensors - and blob references. If the number of data points stored - is less than the limit, all data will be returned. - Otherwise, that limit number of data points will be - randomly selected from this time series and returned. - - Returns: - Callable[[~.BatchReadTensorboardTimeSeriesDataRequest], - ~.BatchReadTensorboardTimeSeriesDataResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'batch_read_tensorboard_time_series_data' not in self._stubs: - self._stubs['batch_read_tensorboard_time_series_data'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/BatchReadTensorboardTimeSeriesData', - request_serializer=tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest.serialize, - response_deserializer=tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse.deserialize, - ) - return self._stubs['batch_read_tensorboard_time_series_data'] - - @property - def read_tensorboard_time_series_data(self) -> Callable[ - [tensorboard_service.ReadTensorboardTimeSeriesDataRequest], - tensorboard_service.ReadTensorboardTimeSeriesDataResponse]: - r"""Return a callable for the read tensorboard time series - data method over gRPC. - - Reads a TensorboardTimeSeries' data. By default, if the number - of data points stored is less than 1000, all data will be - returned. Otherwise, 1000 data points will be randomly selected - from this time series and returned. This value can be changed by - changing max_data_points, which can't be greater than 10k. - - Returns: - Callable[[~.ReadTensorboardTimeSeriesDataRequest], - ~.ReadTensorboardTimeSeriesDataResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'read_tensorboard_time_series_data' not in self._stubs: - self._stubs['read_tensorboard_time_series_data'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/ReadTensorboardTimeSeriesData', - request_serializer=tensorboard_service.ReadTensorboardTimeSeriesDataRequest.serialize, - response_deserializer=tensorboard_service.ReadTensorboardTimeSeriesDataResponse.deserialize, - ) - return self._stubs['read_tensorboard_time_series_data'] - - @property - def read_tensorboard_blob_data(self) -> Callable[ - [tensorboard_service.ReadTensorboardBlobDataRequest], - tensorboard_service.ReadTensorboardBlobDataResponse]: - r"""Return a callable for the read tensorboard blob data method over gRPC. - - Gets bytes of TensorboardBlobs. - This is to allow reading blob data stored in consumer - project's Cloud Storage bucket without users having to - obtain Cloud Storage access permission. - - Returns: - Callable[[~.ReadTensorboardBlobDataRequest], - ~.ReadTensorboardBlobDataResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'read_tensorboard_blob_data' not in self._stubs: - self._stubs['read_tensorboard_blob_data'] = self.grpc_channel.unary_stream( - '/google.cloud.aiplatform.v1beta1.TensorboardService/ReadTensorboardBlobData', - request_serializer=tensorboard_service.ReadTensorboardBlobDataRequest.serialize, - response_deserializer=tensorboard_service.ReadTensorboardBlobDataResponse.deserialize, - ) - return self._stubs['read_tensorboard_blob_data'] - - @property - def write_tensorboard_experiment_data(self) -> Callable[ - [tensorboard_service.WriteTensorboardExperimentDataRequest], - tensorboard_service.WriteTensorboardExperimentDataResponse]: - r"""Return a callable for the write tensorboard experiment - data method over gRPC. - - Write time series data points of multiple - TensorboardTimeSeries in multiple TensorboardRun's. If - any data fail to be ingested, an error will be returned. - - Returns: - Callable[[~.WriteTensorboardExperimentDataRequest], - ~.WriteTensorboardExperimentDataResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'write_tensorboard_experiment_data' not in self._stubs: - self._stubs['write_tensorboard_experiment_data'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/WriteTensorboardExperimentData', - request_serializer=tensorboard_service.WriteTensorboardExperimentDataRequest.serialize, - response_deserializer=tensorboard_service.WriteTensorboardExperimentDataResponse.deserialize, - ) - return self._stubs['write_tensorboard_experiment_data'] - - @property - def write_tensorboard_run_data(self) -> Callable[ - [tensorboard_service.WriteTensorboardRunDataRequest], - tensorboard_service.WriteTensorboardRunDataResponse]: - r"""Return a callable for the write tensorboard run data method over gRPC. - - Write time series data points into multiple - TensorboardTimeSeries under a TensorboardRun. If any - data fail to be ingested, an error will be returned. - - Returns: - Callable[[~.WriteTensorboardRunDataRequest], - ~.WriteTensorboardRunDataResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'write_tensorboard_run_data' not in self._stubs: - self._stubs['write_tensorboard_run_data'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/WriteTensorboardRunData', - request_serializer=tensorboard_service.WriteTensorboardRunDataRequest.serialize, - response_deserializer=tensorboard_service.WriteTensorboardRunDataResponse.deserialize, - ) - return self._stubs['write_tensorboard_run_data'] - - @property - def export_tensorboard_time_series_data(self) -> Callable[ - [tensorboard_service.ExportTensorboardTimeSeriesDataRequest], - tensorboard_service.ExportTensorboardTimeSeriesDataResponse]: - r"""Return a callable for the export tensorboard time series - data method over gRPC. - - Exports a TensorboardTimeSeries' data. Data is - returned in paginated responses. - - Returns: - Callable[[~.ExportTensorboardTimeSeriesDataRequest], - ~.ExportTensorboardTimeSeriesDataResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'export_tensorboard_time_series_data' not in self._stubs: - self._stubs['export_tensorboard_time_series_data'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/ExportTensorboardTimeSeriesData', - request_serializer=tensorboard_service.ExportTensorboardTimeSeriesDataRequest.serialize, - response_deserializer=tensorboard_service.ExportTensorboardTimeSeriesDataResponse.deserialize, - ) - return self._stubs['export_tensorboard_time_series_data'] - - def close(self): - self.grpc_channel.close() - -__all__ = ( - 'TensorboardServiceGrpcTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc_asyncio.py deleted file mode 100644 index 8e89461fd9..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/grpc_asyncio.py +++ /dev/null @@ -1,1009 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers_async -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.aiplatform_v1beta1.types import tensorboard -from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment -from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment as gca_tensorboard_experiment -from google.cloud.aiplatform_v1beta1.types import tensorboard_run -from google.cloud.aiplatform_v1beta1.types import tensorboard_run as gca_tensorboard_run -from google.cloud.aiplatform_v1beta1.types import tensorboard_service -from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series -from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series as gca_tensorboard_time_series -from google.longrunning import operations_pb2 # type: ignore -from .base import TensorboardServiceTransport, DEFAULT_CLIENT_INFO -from .grpc import TensorboardServiceGrpcTransport - - -class TensorboardServiceGrpcAsyncIOTransport(TensorboardServiceTransport): - """gRPC AsyncIO backend transport for TensorboardService. - - TensorboardService - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsAsyncClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_tensorboard(self) -> Callable[ - [tensorboard_service.CreateTensorboardRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the create tensorboard method over gRPC. - - Creates a Tensorboard. - - Returns: - Callable[[~.CreateTensorboardRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_tensorboard' not in self._stubs: - self._stubs['create_tensorboard'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/CreateTensorboard', - request_serializer=tensorboard_service.CreateTensorboardRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_tensorboard'] - - @property - def get_tensorboard(self) -> Callable[ - [tensorboard_service.GetTensorboardRequest], - Awaitable[tensorboard.Tensorboard]]: - r"""Return a callable for the get tensorboard method over gRPC. - - Gets a Tensorboard. - - Returns: - Callable[[~.GetTensorboardRequest], - Awaitable[~.Tensorboard]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_tensorboard' not in self._stubs: - self._stubs['get_tensorboard'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/GetTensorboard', - request_serializer=tensorboard_service.GetTensorboardRequest.serialize, - response_deserializer=tensorboard.Tensorboard.deserialize, - ) - return self._stubs['get_tensorboard'] - - @property - def update_tensorboard(self) -> Callable[ - [tensorboard_service.UpdateTensorboardRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the update tensorboard method over gRPC. - - Updates a Tensorboard. - - Returns: - Callable[[~.UpdateTensorboardRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_tensorboard' not in self._stubs: - self._stubs['update_tensorboard'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/UpdateTensorboard', - request_serializer=tensorboard_service.UpdateTensorboardRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['update_tensorboard'] - - @property - def list_tensorboards(self) -> Callable[ - [tensorboard_service.ListTensorboardsRequest], - Awaitable[tensorboard_service.ListTensorboardsResponse]]: - r"""Return a callable for the list tensorboards method over gRPC. - - Lists Tensorboards in a Location. - - Returns: - Callable[[~.ListTensorboardsRequest], - Awaitable[~.ListTensorboardsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_tensorboards' not in self._stubs: - self._stubs['list_tensorboards'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/ListTensorboards', - request_serializer=tensorboard_service.ListTensorboardsRequest.serialize, - response_deserializer=tensorboard_service.ListTensorboardsResponse.deserialize, - ) - return self._stubs['list_tensorboards'] - - @property - def delete_tensorboard(self) -> Callable[ - [tensorboard_service.DeleteTensorboardRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete tensorboard method over gRPC. - - Deletes a Tensorboard. - - Returns: - Callable[[~.DeleteTensorboardRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_tensorboard' not in self._stubs: - self._stubs['delete_tensorboard'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/DeleteTensorboard', - request_serializer=tensorboard_service.DeleteTensorboardRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_tensorboard'] - - @property - def create_tensorboard_experiment(self) -> Callable[ - [tensorboard_service.CreateTensorboardExperimentRequest], - Awaitable[gca_tensorboard_experiment.TensorboardExperiment]]: - r"""Return a callable for the create tensorboard experiment method over gRPC. - - Creates a TensorboardExperiment. - - Returns: - Callable[[~.CreateTensorboardExperimentRequest], - Awaitable[~.TensorboardExperiment]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_tensorboard_experiment' not in self._stubs: - self._stubs['create_tensorboard_experiment'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/CreateTensorboardExperiment', - request_serializer=tensorboard_service.CreateTensorboardExperimentRequest.serialize, - response_deserializer=gca_tensorboard_experiment.TensorboardExperiment.deserialize, - ) - return self._stubs['create_tensorboard_experiment'] - - @property - def get_tensorboard_experiment(self) -> Callable[ - [tensorboard_service.GetTensorboardExperimentRequest], - Awaitable[tensorboard_experiment.TensorboardExperiment]]: - r"""Return a callable for the get tensorboard experiment method over gRPC. - - Gets a TensorboardExperiment. - - Returns: - Callable[[~.GetTensorboardExperimentRequest], - Awaitable[~.TensorboardExperiment]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_tensorboard_experiment' not in self._stubs: - self._stubs['get_tensorboard_experiment'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/GetTensorboardExperiment', - request_serializer=tensorboard_service.GetTensorboardExperimentRequest.serialize, - response_deserializer=tensorboard_experiment.TensorboardExperiment.deserialize, - ) - return self._stubs['get_tensorboard_experiment'] - - @property - def update_tensorboard_experiment(self) -> Callable[ - [tensorboard_service.UpdateTensorboardExperimentRequest], - Awaitable[gca_tensorboard_experiment.TensorboardExperiment]]: - r"""Return a callable for the update tensorboard experiment method over gRPC. - - Updates a TensorboardExperiment. - - Returns: - Callable[[~.UpdateTensorboardExperimentRequest], - Awaitable[~.TensorboardExperiment]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_tensorboard_experiment' not in self._stubs: - self._stubs['update_tensorboard_experiment'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/UpdateTensorboardExperiment', - request_serializer=tensorboard_service.UpdateTensorboardExperimentRequest.serialize, - response_deserializer=gca_tensorboard_experiment.TensorboardExperiment.deserialize, - ) - return self._stubs['update_tensorboard_experiment'] - - @property - def list_tensorboard_experiments(self) -> Callable[ - [tensorboard_service.ListTensorboardExperimentsRequest], - Awaitable[tensorboard_service.ListTensorboardExperimentsResponse]]: - r"""Return a callable for the list tensorboard experiments method over gRPC. - - Lists TensorboardExperiments in a Location. - - Returns: - Callable[[~.ListTensorboardExperimentsRequest], - Awaitable[~.ListTensorboardExperimentsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_tensorboard_experiments' not in self._stubs: - self._stubs['list_tensorboard_experiments'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/ListTensorboardExperiments', - request_serializer=tensorboard_service.ListTensorboardExperimentsRequest.serialize, - response_deserializer=tensorboard_service.ListTensorboardExperimentsResponse.deserialize, - ) - return self._stubs['list_tensorboard_experiments'] - - @property - def delete_tensorboard_experiment(self) -> Callable[ - [tensorboard_service.DeleteTensorboardExperimentRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete tensorboard experiment method over gRPC. - - Deletes a TensorboardExperiment. - - Returns: - Callable[[~.DeleteTensorboardExperimentRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_tensorboard_experiment' not in self._stubs: - self._stubs['delete_tensorboard_experiment'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/DeleteTensorboardExperiment', - request_serializer=tensorboard_service.DeleteTensorboardExperimentRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_tensorboard_experiment'] - - @property - def create_tensorboard_run(self) -> Callable[ - [tensorboard_service.CreateTensorboardRunRequest], - Awaitable[gca_tensorboard_run.TensorboardRun]]: - r"""Return a callable for the create tensorboard run method over gRPC. - - Creates a TensorboardRun. - - Returns: - Callable[[~.CreateTensorboardRunRequest], - Awaitable[~.TensorboardRun]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_tensorboard_run' not in self._stubs: - self._stubs['create_tensorboard_run'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/CreateTensorboardRun', - request_serializer=tensorboard_service.CreateTensorboardRunRequest.serialize, - response_deserializer=gca_tensorboard_run.TensorboardRun.deserialize, - ) - return self._stubs['create_tensorboard_run'] - - @property - def batch_create_tensorboard_runs(self) -> Callable[ - [tensorboard_service.BatchCreateTensorboardRunsRequest], - Awaitable[tensorboard_service.BatchCreateTensorboardRunsResponse]]: - r"""Return a callable for the batch create tensorboard runs method over gRPC. - - Batch create TensorboardRuns. - - Returns: - Callable[[~.BatchCreateTensorboardRunsRequest], - Awaitable[~.BatchCreateTensorboardRunsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'batch_create_tensorboard_runs' not in self._stubs: - self._stubs['batch_create_tensorboard_runs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/BatchCreateTensorboardRuns', - request_serializer=tensorboard_service.BatchCreateTensorboardRunsRequest.serialize, - response_deserializer=tensorboard_service.BatchCreateTensorboardRunsResponse.deserialize, - ) - return self._stubs['batch_create_tensorboard_runs'] - - @property - def get_tensorboard_run(self) -> Callable[ - [tensorboard_service.GetTensorboardRunRequest], - Awaitable[tensorboard_run.TensorboardRun]]: - r"""Return a callable for the get tensorboard run method over gRPC. - - Gets a TensorboardRun. - - Returns: - Callable[[~.GetTensorboardRunRequest], - Awaitable[~.TensorboardRun]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_tensorboard_run' not in self._stubs: - self._stubs['get_tensorboard_run'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/GetTensorboardRun', - request_serializer=tensorboard_service.GetTensorboardRunRequest.serialize, - response_deserializer=tensorboard_run.TensorboardRun.deserialize, - ) - return self._stubs['get_tensorboard_run'] - - @property - def update_tensorboard_run(self) -> Callable[ - [tensorboard_service.UpdateTensorboardRunRequest], - Awaitable[gca_tensorboard_run.TensorboardRun]]: - r"""Return a callable for the update tensorboard run method over gRPC. - - Updates a TensorboardRun. - - Returns: - Callable[[~.UpdateTensorboardRunRequest], - Awaitable[~.TensorboardRun]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_tensorboard_run' not in self._stubs: - self._stubs['update_tensorboard_run'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/UpdateTensorboardRun', - request_serializer=tensorboard_service.UpdateTensorboardRunRequest.serialize, - response_deserializer=gca_tensorboard_run.TensorboardRun.deserialize, - ) - return self._stubs['update_tensorboard_run'] - - @property - def list_tensorboard_runs(self) -> Callable[ - [tensorboard_service.ListTensorboardRunsRequest], - Awaitable[tensorboard_service.ListTensorboardRunsResponse]]: - r"""Return a callable for the list tensorboard runs method over gRPC. - - Lists TensorboardRuns in a Location. - - Returns: - Callable[[~.ListTensorboardRunsRequest], - Awaitable[~.ListTensorboardRunsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_tensorboard_runs' not in self._stubs: - self._stubs['list_tensorboard_runs'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/ListTensorboardRuns', - request_serializer=tensorboard_service.ListTensorboardRunsRequest.serialize, - response_deserializer=tensorboard_service.ListTensorboardRunsResponse.deserialize, - ) - return self._stubs['list_tensorboard_runs'] - - @property - def delete_tensorboard_run(self) -> Callable[ - [tensorboard_service.DeleteTensorboardRunRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete tensorboard run method over gRPC. - - Deletes a TensorboardRun. - - Returns: - Callable[[~.DeleteTensorboardRunRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_tensorboard_run' not in self._stubs: - self._stubs['delete_tensorboard_run'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/DeleteTensorboardRun', - request_serializer=tensorboard_service.DeleteTensorboardRunRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_tensorboard_run'] - - @property - def batch_create_tensorboard_time_series(self) -> Callable[ - [tensorboard_service.BatchCreateTensorboardTimeSeriesRequest], - Awaitable[tensorboard_service.BatchCreateTensorboardTimeSeriesResponse]]: - r"""Return a callable for the batch create tensorboard time - series method over gRPC. - - Batch create TensorboardTimeSeries that belong to a - TensorboardExperiment. - - Returns: - Callable[[~.BatchCreateTensorboardTimeSeriesRequest], - Awaitable[~.BatchCreateTensorboardTimeSeriesResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'batch_create_tensorboard_time_series' not in self._stubs: - self._stubs['batch_create_tensorboard_time_series'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/BatchCreateTensorboardTimeSeries', - request_serializer=tensorboard_service.BatchCreateTensorboardTimeSeriesRequest.serialize, - response_deserializer=tensorboard_service.BatchCreateTensorboardTimeSeriesResponse.deserialize, - ) - return self._stubs['batch_create_tensorboard_time_series'] - - @property - def create_tensorboard_time_series(self) -> Callable[ - [tensorboard_service.CreateTensorboardTimeSeriesRequest], - Awaitable[gca_tensorboard_time_series.TensorboardTimeSeries]]: - r"""Return a callable for the create tensorboard time series method over gRPC. - - Creates a TensorboardTimeSeries. - - Returns: - Callable[[~.CreateTensorboardTimeSeriesRequest], - Awaitable[~.TensorboardTimeSeries]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_tensorboard_time_series' not in self._stubs: - self._stubs['create_tensorboard_time_series'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/CreateTensorboardTimeSeries', - request_serializer=tensorboard_service.CreateTensorboardTimeSeriesRequest.serialize, - response_deserializer=gca_tensorboard_time_series.TensorboardTimeSeries.deserialize, - ) - return self._stubs['create_tensorboard_time_series'] - - @property - def get_tensorboard_time_series(self) -> Callable[ - [tensorboard_service.GetTensorboardTimeSeriesRequest], - Awaitable[tensorboard_time_series.TensorboardTimeSeries]]: - r"""Return a callable for the get tensorboard time series method over gRPC. - - Gets a TensorboardTimeSeries. - - Returns: - Callable[[~.GetTensorboardTimeSeriesRequest], - Awaitable[~.TensorboardTimeSeries]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_tensorboard_time_series' not in self._stubs: - self._stubs['get_tensorboard_time_series'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/GetTensorboardTimeSeries', - request_serializer=tensorboard_service.GetTensorboardTimeSeriesRequest.serialize, - response_deserializer=tensorboard_time_series.TensorboardTimeSeries.deserialize, - ) - return self._stubs['get_tensorboard_time_series'] - - @property - def update_tensorboard_time_series(self) -> Callable[ - [tensorboard_service.UpdateTensorboardTimeSeriesRequest], - Awaitable[gca_tensorboard_time_series.TensorboardTimeSeries]]: - r"""Return a callable for the update tensorboard time series method over gRPC. - - Updates a TensorboardTimeSeries. - - Returns: - Callable[[~.UpdateTensorboardTimeSeriesRequest], - Awaitable[~.TensorboardTimeSeries]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_tensorboard_time_series' not in self._stubs: - self._stubs['update_tensorboard_time_series'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/UpdateTensorboardTimeSeries', - request_serializer=tensorboard_service.UpdateTensorboardTimeSeriesRequest.serialize, - response_deserializer=gca_tensorboard_time_series.TensorboardTimeSeries.deserialize, - ) - return self._stubs['update_tensorboard_time_series'] - - @property - def list_tensorboard_time_series(self) -> Callable[ - [tensorboard_service.ListTensorboardTimeSeriesRequest], - Awaitable[tensorboard_service.ListTensorboardTimeSeriesResponse]]: - r"""Return a callable for the list tensorboard time series method over gRPC. - - Lists TensorboardTimeSeries in a Location. - - Returns: - Callable[[~.ListTensorboardTimeSeriesRequest], - Awaitable[~.ListTensorboardTimeSeriesResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_tensorboard_time_series' not in self._stubs: - self._stubs['list_tensorboard_time_series'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/ListTensorboardTimeSeries', - request_serializer=tensorboard_service.ListTensorboardTimeSeriesRequest.serialize, - response_deserializer=tensorboard_service.ListTensorboardTimeSeriesResponse.deserialize, - ) - return self._stubs['list_tensorboard_time_series'] - - @property - def delete_tensorboard_time_series(self) -> Callable[ - [tensorboard_service.DeleteTensorboardTimeSeriesRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the delete tensorboard time series method over gRPC. - - Deletes a TensorboardTimeSeries. - - Returns: - Callable[[~.DeleteTensorboardTimeSeriesRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_tensorboard_time_series' not in self._stubs: - self._stubs['delete_tensorboard_time_series'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/DeleteTensorboardTimeSeries', - request_serializer=tensorboard_service.DeleteTensorboardTimeSeriesRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['delete_tensorboard_time_series'] - - @property - def batch_read_tensorboard_time_series_data(self) -> Callable[ - [tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest], - Awaitable[tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse]]: - r"""Return a callable for the batch read tensorboard time - series data method over gRPC. - - Reads multiple TensorboardTimeSeries' data. The data - point number limit is 1000 for scalars, 100 for tensors - and blob references. If the number of data points stored - is less than the limit, all data will be returned. - Otherwise, that limit number of data points will be - randomly selected from this time series and returned. - - Returns: - Callable[[~.BatchReadTensorboardTimeSeriesDataRequest], - Awaitable[~.BatchReadTensorboardTimeSeriesDataResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'batch_read_tensorboard_time_series_data' not in self._stubs: - self._stubs['batch_read_tensorboard_time_series_data'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/BatchReadTensorboardTimeSeriesData', - request_serializer=tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest.serialize, - response_deserializer=tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse.deserialize, - ) - return self._stubs['batch_read_tensorboard_time_series_data'] - - @property - def read_tensorboard_time_series_data(self) -> Callable[ - [tensorboard_service.ReadTensorboardTimeSeriesDataRequest], - Awaitable[tensorboard_service.ReadTensorboardTimeSeriesDataResponse]]: - r"""Return a callable for the read tensorboard time series - data method over gRPC. - - Reads a TensorboardTimeSeries' data. By default, if the number - of data points stored is less than 1000, all data will be - returned. Otherwise, 1000 data points will be randomly selected - from this time series and returned. This value can be changed by - changing max_data_points, which can't be greater than 10k. - - Returns: - Callable[[~.ReadTensorboardTimeSeriesDataRequest], - Awaitable[~.ReadTensorboardTimeSeriesDataResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'read_tensorboard_time_series_data' not in self._stubs: - self._stubs['read_tensorboard_time_series_data'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/ReadTensorboardTimeSeriesData', - request_serializer=tensorboard_service.ReadTensorboardTimeSeriesDataRequest.serialize, - response_deserializer=tensorboard_service.ReadTensorboardTimeSeriesDataResponse.deserialize, - ) - return self._stubs['read_tensorboard_time_series_data'] - - @property - def read_tensorboard_blob_data(self) -> Callable[ - [tensorboard_service.ReadTensorboardBlobDataRequest], - Awaitable[tensorboard_service.ReadTensorboardBlobDataResponse]]: - r"""Return a callable for the read tensorboard blob data method over gRPC. - - Gets bytes of TensorboardBlobs. - This is to allow reading blob data stored in consumer - project's Cloud Storage bucket without users having to - obtain Cloud Storage access permission. - - Returns: - Callable[[~.ReadTensorboardBlobDataRequest], - Awaitable[~.ReadTensorboardBlobDataResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'read_tensorboard_blob_data' not in self._stubs: - self._stubs['read_tensorboard_blob_data'] = self.grpc_channel.unary_stream( - '/google.cloud.aiplatform.v1beta1.TensorboardService/ReadTensorboardBlobData', - request_serializer=tensorboard_service.ReadTensorboardBlobDataRequest.serialize, - response_deserializer=tensorboard_service.ReadTensorboardBlobDataResponse.deserialize, - ) - return self._stubs['read_tensorboard_blob_data'] - - @property - def write_tensorboard_experiment_data(self) -> Callable[ - [tensorboard_service.WriteTensorboardExperimentDataRequest], - Awaitable[tensorboard_service.WriteTensorboardExperimentDataResponse]]: - r"""Return a callable for the write tensorboard experiment - data method over gRPC. - - Write time series data points of multiple - TensorboardTimeSeries in multiple TensorboardRun's. If - any data fail to be ingested, an error will be returned. - - Returns: - Callable[[~.WriteTensorboardExperimentDataRequest], - Awaitable[~.WriteTensorboardExperimentDataResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'write_tensorboard_experiment_data' not in self._stubs: - self._stubs['write_tensorboard_experiment_data'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/WriteTensorboardExperimentData', - request_serializer=tensorboard_service.WriteTensorboardExperimentDataRequest.serialize, - response_deserializer=tensorboard_service.WriteTensorboardExperimentDataResponse.deserialize, - ) - return self._stubs['write_tensorboard_experiment_data'] - - @property - def write_tensorboard_run_data(self) -> Callable[ - [tensorboard_service.WriteTensorboardRunDataRequest], - Awaitable[tensorboard_service.WriteTensorboardRunDataResponse]]: - r"""Return a callable for the write tensorboard run data method over gRPC. - - Write time series data points into multiple - TensorboardTimeSeries under a TensorboardRun. If any - data fail to be ingested, an error will be returned. - - Returns: - Callable[[~.WriteTensorboardRunDataRequest], - Awaitable[~.WriteTensorboardRunDataResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'write_tensorboard_run_data' not in self._stubs: - self._stubs['write_tensorboard_run_data'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/WriteTensorboardRunData', - request_serializer=tensorboard_service.WriteTensorboardRunDataRequest.serialize, - response_deserializer=tensorboard_service.WriteTensorboardRunDataResponse.deserialize, - ) - return self._stubs['write_tensorboard_run_data'] - - @property - def export_tensorboard_time_series_data(self) -> Callable[ - [tensorboard_service.ExportTensorboardTimeSeriesDataRequest], - Awaitable[tensorboard_service.ExportTensorboardTimeSeriesDataResponse]]: - r"""Return a callable for the export tensorboard time series - data method over gRPC. - - Exports a TensorboardTimeSeries' data. Data is - returned in paginated responses. - - Returns: - Callable[[~.ExportTensorboardTimeSeriesDataRequest], - Awaitable[~.ExportTensorboardTimeSeriesDataResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'export_tensorboard_time_series_data' not in self._stubs: - self._stubs['export_tensorboard_time_series_data'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.TensorboardService/ExportTensorboardTimeSeriesData', - request_serializer=tensorboard_service.ExportTensorboardTimeSeriesDataRequest.serialize, - response_deserializer=tensorboard_service.ExportTensorboardTimeSeriesDataResponse.deserialize, - ) - return self._stubs['export_tensorboard_time_series_data'] - - def close(self): - return self.grpc_channel.close() - - -__all__ = ( - 'TensorboardServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/__init__.py deleted file mode 100644 index d629499098..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import VizierServiceClient -from .async_client import VizierServiceAsyncClient - -__all__ = ( - 'VizierServiceClient', - 'VizierServiceAsyncClient', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/async_client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/async_client.py deleted file mode 100644 index f5f75e86cf..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/async_client.py +++ /dev/null @@ -1,1292 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core.client_options import ClientOptions -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api_core import operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.vizier_service import pagers -from google.cloud.aiplatform_v1beta1.types import study -from google.cloud.aiplatform_v1beta1.types import study as gca_study -from google.cloud.aiplatform_v1beta1.types import vizier_service -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import VizierServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import VizierServiceGrpcAsyncIOTransport -from .client import VizierServiceClient - - -class VizierServiceAsyncClient: - """Vertex AI Vizier API. - Vertex AI Vizier is a service to solve blackbox optimization - problems, such as tuning machine learning hyperparameters and - searching over deep learning architectures. - """ - - _client: VizierServiceClient - - DEFAULT_ENDPOINT = VizierServiceClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = VizierServiceClient.DEFAULT_MTLS_ENDPOINT - - custom_job_path = staticmethod(VizierServiceClient.custom_job_path) - parse_custom_job_path = staticmethod(VizierServiceClient.parse_custom_job_path) - study_path = staticmethod(VizierServiceClient.study_path) - parse_study_path = staticmethod(VizierServiceClient.parse_study_path) - trial_path = staticmethod(VizierServiceClient.trial_path) - parse_trial_path = staticmethod(VizierServiceClient.parse_trial_path) - common_billing_account_path = staticmethod(VizierServiceClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(VizierServiceClient.parse_common_billing_account_path) - common_folder_path = staticmethod(VizierServiceClient.common_folder_path) - parse_common_folder_path = staticmethod(VizierServiceClient.parse_common_folder_path) - common_organization_path = staticmethod(VizierServiceClient.common_organization_path) - parse_common_organization_path = staticmethod(VizierServiceClient.parse_common_organization_path) - common_project_path = staticmethod(VizierServiceClient.common_project_path) - parse_common_project_path = staticmethod(VizierServiceClient.parse_common_project_path) - common_location_path = staticmethod(VizierServiceClient.common_location_path) - parse_common_location_path = staticmethod(VizierServiceClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - VizierServiceAsyncClient: The constructed client. - """ - return VizierServiceClient.from_service_account_info.__func__(VizierServiceAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - VizierServiceAsyncClient: The constructed client. - """ - return VizierServiceClient.from_service_account_file.__func__(VizierServiceAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> VizierServiceTransport: - """Returns the transport used by the client instance. - - Returns: - VizierServiceTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(VizierServiceClient).get_transport_class, type(VizierServiceClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, VizierServiceTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the vizier service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.VizierServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = VizierServiceClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def create_study(self, - request: Union[vizier_service.CreateStudyRequest, dict] = None, - *, - parent: str = None, - study: gca_study.Study = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_study.Study: - r"""Creates a Study. A resource name will be generated - after creation of the Study. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CreateStudyRequest, dict]): - The request object. Request message for - [VizierService.CreateStudy][google.cloud.aiplatform.v1beta1.VizierService.CreateStudy]. - parent (:class:`str`): - Required. The resource name of the Location to create - the CustomJob in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - study (:class:`google.cloud.aiplatform_v1beta1.types.Study`): - Required. The Study configuration - used to create the Study. - - This corresponds to the ``study`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Study: - LINT.IfChange - A message representing a Study. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, study]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = vizier_service.CreateStudyRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if study is not None: - request.study = study - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_study, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_study(self, - request: Union[vizier_service.GetStudyRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> study.Study: - r"""Gets a Study by name. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.GetStudyRequest, dict]): - The request object. Request message for - [VizierService.GetStudy][google.cloud.aiplatform.v1beta1.VizierService.GetStudy]. - name (:class:`str`): - Required. The name of the Study resource. Format: - ``projects/{project}/locations/{location}/studies/{study}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Study: - LINT.IfChange - A message representing a Study. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = vizier_service.GetStudyRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_study, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_studies(self, - request: Union[vizier_service.ListStudiesRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListStudiesAsyncPager: - r"""Lists all the studies in a region for an associated - project. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListStudiesRequest, dict]): - The request object. Request message for - [VizierService.ListStudies][google.cloud.aiplatform.v1beta1.VizierService.ListStudies]. - parent (:class:`str`): - Required. The resource name of the Location to list the - Study from. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.vizier_service.pagers.ListStudiesAsyncPager: - Response message for - [VizierService.ListStudies][google.cloud.aiplatform.v1beta1.VizierService.ListStudies]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = vizier_service.ListStudiesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_studies, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListStudiesAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_study(self, - request: Union[vizier_service.DeleteStudyRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Deletes a Study. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.DeleteStudyRequest, dict]): - The request object. Request message for - [VizierService.DeleteStudy][google.cloud.aiplatform.v1beta1.VizierService.DeleteStudy]. - name (:class:`str`): - Required. The name of the Study resource to be deleted. - Format: - ``projects/{project}/locations/{location}/studies/{study}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = vizier_service.DeleteStudyRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_study, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - async def lookup_study(self, - request: Union[vizier_service.LookupStudyRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> study.Study: - r"""Looks a study up using the user-defined display_name field - instead of the fully qualified resource name. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.LookupStudyRequest, dict]): - The request object. Request message for - [VizierService.LookupStudy][google.cloud.aiplatform.v1beta1.VizierService.LookupStudy]. - parent (:class:`str`): - Required. The resource name of the Location to get the - Study from. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Study: - LINT.IfChange - A message representing a Study. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = vizier_service.LookupStudyRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.lookup_study, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def suggest_trials(self, - request: Union[vizier_service.SuggestTrialsRequest, dict] = None, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Adds one or more Trials to a Study, with parameter values - suggested by Vertex AI Vizier. Returns a long-running operation - associated with the generation of Trial suggestions. When this - long-running operation succeeds, it will contain a - [SuggestTrialsResponse][google.cloud.ml.v1.SuggestTrialsResponse]. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.SuggestTrialsRequest, dict]): - The request object. Request message for - [VizierService.SuggestTrials][google.cloud.aiplatform.v1beta1.VizierService.SuggestTrials]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.SuggestTrialsResponse` - Response message for - [VizierService.SuggestTrials][google.cloud.aiplatform.v1beta1.VizierService.SuggestTrials]. - - """ - # Create or coerce a protobuf request object. - request = vizier_service.SuggestTrialsRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.suggest_trials, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - vizier_service.SuggestTrialsResponse, - metadata_type=vizier_service.SuggestTrialsMetadata, - ) - - # Done; return the response. - return response - - async def create_trial(self, - request: Union[vizier_service.CreateTrialRequest, dict] = None, - *, - parent: str = None, - trial: study.Trial = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> study.Trial: - r"""Adds a user provided Trial to a Study. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CreateTrialRequest, dict]): - The request object. Request message for - [VizierService.CreateTrial][google.cloud.aiplatform.v1beta1.VizierService.CreateTrial]. - parent (:class:`str`): - Required. The resource name of the Study to create the - Trial in. Format: - ``projects/{project}/locations/{location}/studies/{study}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - trial (:class:`google.cloud.aiplatform_v1beta1.types.Trial`): - Required. The Trial to create. - This corresponds to the ``trial`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Trial: - A message representing a Trial. A - Trial contains a unique set of - Parameters that has been or will be - evaluated, along with the objective - metrics got by running the Trial. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, trial]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = vizier_service.CreateTrialRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if trial is not None: - request.trial = trial - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_trial, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_trial(self, - request: Union[vizier_service.GetTrialRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> study.Trial: - r"""Gets a Trial. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.GetTrialRequest, dict]): - The request object. Request message for - [VizierService.GetTrial][google.cloud.aiplatform.v1beta1.VizierService.GetTrial]. - name (:class:`str`): - Required. The name of the Trial resource. Format: - ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Trial: - A message representing a Trial. A - Trial contains a unique set of - Parameters that has been or will be - evaluated, along with the objective - metrics got by running the Trial. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = vizier_service.GetTrialRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_trial, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_trials(self, - request: Union[vizier_service.ListTrialsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListTrialsAsyncPager: - r"""Lists the Trials associated with a Study. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListTrialsRequest, dict]): - The request object. Request message for - [VizierService.ListTrials][google.cloud.aiplatform.v1beta1.VizierService.ListTrials]. - parent (:class:`str`): - Required. The resource name of the Study to list the - Trial from. Format: - ``projects/{project}/locations/{location}/studies/{study}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.vizier_service.pagers.ListTrialsAsyncPager: - Response message for - [VizierService.ListTrials][google.cloud.aiplatform.v1beta1.VizierService.ListTrials]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = vizier_service.ListTrialsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_trials, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListTrialsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def add_trial_measurement(self, - request: Union[vizier_service.AddTrialMeasurementRequest, dict] = None, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> study.Trial: - r"""Adds a measurement of the objective metrics to a - Trial. This measurement is assumed to have been taken - before the Trial is complete. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.AddTrialMeasurementRequest, dict]): - The request object. Request message for - [VizierService.AddTrialMeasurement][google.cloud.aiplatform.v1beta1.VizierService.AddTrialMeasurement]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Trial: - A message representing a Trial. A - Trial contains a unique set of - Parameters that has been or will be - evaluated, along with the objective - metrics got by running the Trial. - - """ - # Create or coerce a protobuf request object. - request = vizier_service.AddTrialMeasurementRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.add_trial_measurement, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("trial_name", request.trial_name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def complete_trial(self, - request: Union[vizier_service.CompleteTrialRequest, dict] = None, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> study.Trial: - r"""Marks a Trial as complete. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CompleteTrialRequest, dict]): - The request object. Request message for - [VizierService.CompleteTrial][google.cloud.aiplatform.v1beta1.VizierService.CompleteTrial]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Trial: - A message representing a Trial. A - Trial contains a unique set of - Parameters that has been or will be - evaluated, along with the objective - metrics got by running the Trial. - - """ - # Create or coerce a protobuf request object. - request = vizier_service.CompleteTrialRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.complete_trial, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_trial(self, - request: Union[vizier_service.DeleteTrialRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Deletes a Trial. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.DeleteTrialRequest, dict]): - The request object. Request message for - [VizierService.DeleteTrial][google.cloud.aiplatform.v1beta1.VizierService.DeleteTrial]. - name (:class:`str`): - Required. The Trial's name. Format: - ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = vizier_service.DeleteTrialRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_trial, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - async def check_trial_early_stopping_state(self, - request: Union[vizier_service.CheckTrialEarlyStoppingStateRequest, dict] = None, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Checks whether a Trial should stop or not. Returns a - long-running operation. When the operation is successful, it - will contain a - [CheckTrialEarlyStoppingStateResponse][google.cloud.ml.v1.CheckTrialEarlyStoppingStateResponse]. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CheckTrialEarlyStoppingStateRequest, dict]): - The request object. Request message for - [VizierService.CheckTrialEarlyStoppingState][google.cloud.aiplatform.v1beta1.VizierService.CheckTrialEarlyStoppingState]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.CheckTrialEarlyStoppingStateResponse` - Response message for - [VizierService.CheckTrialEarlyStoppingState][google.cloud.aiplatform.v1beta1.VizierService.CheckTrialEarlyStoppingState]. - - """ - # Create or coerce a protobuf request object. - request = vizier_service.CheckTrialEarlyStoppingStateRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.check_trial_early_stopping_state, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("trial_name", request.trial_name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - vizier_service.CheckTrialEarlyStoppingStateResponse, - metadata_type=vizier_service.CheckTrialEarlyStoppingStateMetatdata, - ) - - # Done; return the response. - return response - - async def stop_trial(self, - request: Union[vizier_service.StopTrialRequest, dict] = None, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> study.Trial: - r"""Stops a Trial. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.StopTrialRequest, dict]): - The request object. Request message for - [VizierService.StopTrial][google.cloud.aiplatform.v1beta1.VizierService.StopTrial]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Trial: - A message representing a Trial. A - Trial contains a unique set of - Parameters that has been or will be - evaluated, along with the objective - metrics got by running the Trial. - - """ - # Create or coerce a protobuf request object. - request = vizier_service.StopTrialRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.stop_trial, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_optimal_trials(self, - request: Union[vizier_service.ListOptimalTrialsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> vizier_service.ListOptimalTrialsResponse: - r"""Lists the pareto-optimal Trials for multi-objective Study or the - optimal Trials for single-objective Study. The definition of - pareto-optimal can be checked in wiki page. - https://en.wikipedia.org/wiki/Pareto_efficiency - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListOptimalTrialsRequest, dict]): - The request object. Request message for - [VizierService.ListOptimalTrials][google.cloud.aiplatform.v1beta1.VizierService.ListOptimalTrials]. - parent (:class:`str`): - Required. The name of the Study that - the optimal Trial belongs to. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.ListOptimalTrialsResponse: - Response message for - [VizierService.ListOptimalTrials][google.cloud.aiplatform.v1beta1.VizierService.ListOptimalTrials]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = vizier_service.ListOptimalTrialsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_optimal_trials, - default_timeout=5.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def __aenter__(self): - return self - - async def __aexit__(self, exc_type, exc, tb): - await self.transport.close() - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "VizierServiceAsyncClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/client.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/client.py deleted file mode 100644 index b6bcd7578f..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/client.py +++ /dev/null @@ -1,1513 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import os -import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union -import pkg_resources - -from google.api_core import client_options as client_options_lib -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore - -from google.api_core import operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.aiplatform_v1beta1.services.vizier_service import pagers -from google.cloud.aiplatform_v1beta1.types import study -from google.cloud.aiplatform_v1beta1.types import study as gca_study -from google.cloud.aiplatform_v1beta1.types import vizier_service -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import VizierServiceTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import VizierServiceGrpcTransport -from .transports.grpc_asyncio import VizierServiceGrpcAsyncIOTransport - - -class VizierServiceClientMeta(type): - """Metaclass for the VizierService client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[VizierServiceTransport]] - _transport_registry["grpc"] = VizierServiceGrpcTransport - _transport_registry["grpc_asyncio"] = VizierServiceGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[VizierServiceTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class VizierServiceClient(metaclass=VizierServiceClientMeta): - """Vertex AI Vizier API. - Vertex AI Vizier is a service to solve blackbox optimization - problems, such as tuning machine learning hyperparameters and - searching over deep learning architectures. - """ - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "aiplatform.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - VizierServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - VizierServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> VizierServiceTransport: - """Returns the transport used by the client instance. - - Returns: - VizierServiceTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def custom_job_path(project: str,location: str,custom_job: str,) -> str: - """Returns a fully-qualified custom_job string.""" - return "projects/{project}/locations/{location}/customJobs/{custom_job}".format(project=project, location=location, custom_job=custom_job, ) - - @staticmethod - def parse_custom_job_path(path: str) -> Dict[str,str]: - """Parses a custom_job path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/customJobs/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def study_path(project: str,location: str,study: str,) -> str: - """Returns a fully-qualified study string.""" - return "projects/{project}/locations/{location}/studies/{study}".format(project=project, location=location, study=study, ) - - @staticmethod - def parse_study_path(path: str) -> Dict[str,str]: - """Parses a study path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/studies/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def trial_path(project: str,location: str,study: str,trial: str,) -> str: - """Returns a fully-qualified trial string.""" - return "projects/{project}/locations/{location}/studies/{study}/trials/{trial}".format(project=project, location=location, study=study, trial=trial, ) - - @staticmethod - def parse_trial_path(path: str) -> Dict[str,str]: - """Parses a trial path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/studies/(?P.+?)/trials/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, VizierServiceTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the vizier service client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, VizierServiceTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): - raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, VizierServiceTransport): - # transport is a VizierServiceTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - always_use_jwt_access=True, - ) - - def create_study(self, - request: Union[vizier_service.CreateStudyRequest, dict] = None, - *, - parent: str = None, - study: gca_study.Study = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gca_study.Study: - r"""Creates a Study. A resource name will be generated - after creation of the Study. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CreateStudyRequest, dict]): - The request object. Request message for - [VizierService.CreateStudy][google.cloud.aiplatform.v1beta1.VizierService.CreateStudy]. - parent (str): - Required. The resource name of the Location to create - the CustomJob in. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - study (google.cloud.aiplatform_v1beta1.types.Study): - Required. The Study configuration - used to create the Study. - - This corresponds to the ``study`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Study: - LINT.IfChange - A message representing a Study. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, study]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a vizier_service.CreateStudyRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, vizier_service.CreateStudyRequest): - request = vizier_service.CreateStudyRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if study is not None: - request.study = study - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_study] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_study(self, - request: Union[vizier_service.GetStudyRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> study.Study: - r"""Gets a Study by name. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.GetStudyRequest, dict]): - The request object. Request message for - [VizierService.GetStudy][google.cloud.aiplatform.v1beta1.VizierService.GetStudy]. - name (str): - Required. The name of the Study resource. Format: - ``projects/{project}/locations/{location}/studies/{study}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Study: - LINT.IfChange - A message representing a Study. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a vizier_service.GetStudyRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, vizier_service.GetStudyRequest): - request = vizier_service.GetStudyRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_study] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_studies(self, - request: Union[vizier_service.ListStudiesRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListStudiesPager: - r"""Lists all the studies in a region for an associated - project. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListStudiesRequest, dict]): - The request object. Request message for - [VizierService.ListStudies][google.cloud.aiplatform.v1beta1.VizierService.ListStudies]. - parent (str): - Required. The resource name of the Location to list the - Study from. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.vizier_service.pagers.ListStudiesPager: - Response message for - [VizierService.ListStudies][google.cloud.aiplatform.v1beta1.VizierService.ListStudies]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a vizier_service.ListStudiesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, vizier_service.ListStudiesRequest): - request = vizier_service.ListStudiesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_studies] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListStudiesPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_study(self, - request: Union[vizier_service.DeleteStudyRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Deletes a Study. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.DeleteStudyRequest, dict]): - The request object. Request message for - [VizierService.DeleteStudy][google.cloud.aiplatform.v1beta1.VizierService.DeleteStudy]. - name (str): - Required. The name of the Study resource to be deleted. - Format: - ``projects/{project}/locations/{location}/studies/{study}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a vizier_service.DeleteStudyRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, vizier_service.DeleteStudyRequest): - request = vizier_service.DeleteStudyRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_study] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - def lookup_study(self, - request: Union[vizier_service.LookupStudyRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> study.Study: - r"""Looks a study up using the user-defined display_name field - instead of the fully qualified resource name. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.LookupStudyRequest, dict]): - The request object. Request message for - [VizierService.LookupStudy][google.cloud.aiplatform.v1beta1.VizierService.LookupStudy]. - parent (str): - Required. The resource name of the Location to get the - Study from. Format: - ``projects/{project}/locations/{location}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Study: - LINT.IfChange - A message representing a Study. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a vizier_service.LookupStudyRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, vizier_service.LookupStudyRequest): - request = vizier_service.LookupStudyRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.lookup_study] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def suggest_trials(self, - request: Union[vizier_service.SuggestTrialsRequest, dict] = None, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: - r"""Adds one or more Trials to a Study, with parameter values - suggested by Vertex AI Vizier. Returns a long-running operation - associated with the generation of Trial suggestions. When this - long-running operation succeeds, it will contain a - [SuggestTrialsResponse][google.cloud.ml.v1.SuggestTrialsResponse]. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.SuggestTrialsRequest, dict]): - The request object. Request message for - [VizierService.SuggestTrials][google.cloud.aiplatform.v1beta1.VizierService.SuggestTrials]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.SuggestTrialsResponse` - Response message for - [VizierService.SuggestTrials][google.cloud.aiplatform.v1beta1.VizierService.SuggestTrials]. - - """ - # Create or coerce a protobuf request object. - # Minor optimization to avoid making a copy if the user passes - # in a vizier_service.SuggestTrialsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, vizier_service.SuggestTrialsRequest): - request = vizier_service.SuggestTrialsRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.suggest_trials] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - vizier_service.SuggestTrialsResponse, - metadata_type=vizier_service.SuggestTrialsMetadata, - ) - - # Done; return the response. - return response - - def create_trial(self, - request: Union[vizier_service.CreateTrialRequest, dict] = None, - *, - parent: str = None, - trial: study.Trial = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> study.Trial: - r"""Adds a user provided Trial to a Study. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CreateTrialRequest, dict]): - The request object. Request message for - [VizierService.CreateTrial][google.cloud.aiplatform.v1beta1.VizierService.CreateTrial]. - parent (str): - Required. The resource name of the Study to create the - Trial in. Format: - ``projects/{project}/locations/{location}/studies/{study}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - trial (google.cloud.aiplatform_v1beta1.types.Trial): - Required. The Trial to create. - This corresponds to the ``trial`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Trial: - A message representing a Trial. A - Trial contains a unique set of - Parameters that has been or will be - evaluated, along with the objective - metrics got by running the Trial. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, trial]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a vizier_service.CreateTrialRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, vizier_service.CreateTrialRequest): - request = vizier_service.CreateTrialRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if trial is not None: - request.trial = trial - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_trial] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_trial(self, - request: Union[vizier_service.GetTrialRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> study.Trial: - r"""Gets a Trial. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.GetTrialRequest, dict]): - The request object. Request message for - [VizierService.GetTrial][google.cloud.aiplatform.v1beta1.VizierService.GetTrial]. - name (str): - Required. The name of the Trial resource. Format: - ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Trial: - A message representing a Trial. A - Trial contains a unique set of - Parameters that has been or will be - evaluated, along with the objective - metrics got by running the Trial. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a vizier_service.GetTrialRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, vizier_service.GetTrialRequest): - request = vizier_service.GetTrialRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_trial] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_trials(self, - request: Union[vizier_service.ListTrialsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListTrialsPager: - r"""Lists the Trials associated with a Study. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListTrialsRequest, dict]): - The request object. Request message for - [VizierService.ListTrials][google.cloud.aiplatform.v1beta1.VizierService.ListTrials]. - parent (str): - Required. The resource name of the Study to list the - Trial from. Format: - ``projects/{project}/locations/{location}/studies/{study}`` - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.services.vizier_service.pagers.ListTrialsPager: - Response message for - [VizierService.ListTrials][google.cloud.aiplatform.v1beta1.VizierService.ListTrials]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a vizier_service.ListTrialsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, vizier_service.ListTrialsRequest): - request = vizier_service.ListTrialsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_trials] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListTrialsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def add_trial_measurement(self, - request: Union[vizier_service.AddTrialMeasurementRequest, dict] = None, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> study.Trial: - r"""Adds a measurement of the objective metrics to a - Trial. This measurement is assumed to have been taken - before the Trial is complete. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.AddTrialMeasurementRequest, dict]): - The request object. Request message for - [VizierService.AddTrialMeasurement][google.cloud.aiplatform.v1beta1.VizierService.AddTrialMeasurement]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Trial: - A message representing a Trial. A - Trial contains a unique set of - Parameters that has been or will be - evaluated, along with the objective - metrics got by running the Trial. - - """ - # Create or coerce a protobuf request object. - # Minor optimization to avoid making a copy if the user passes - # in a vizier_service.AddTrialMeasurementRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, vizier_service.AddTrialMeasurementRequest): - request = vizier_service.AddTrialMeasurementRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.add_trial_measurement] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("trial_name", request.trial_name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def complete_trial(self, - request: Union[vizier_service.CompleteTrialRequest, dict] = None, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> study.Trial: - r"""Marks a Trial as complete. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CompleteTrialRequest, dict]): - The request object. Request message for - [VizierService.CompleteTrial][google.cloud.aiplatform.v1beta1.VizierService.CompleteTrial]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Trial: - A message representing a Trial. A - Trial contains a unique set of - Parameters that has been or will be - evaluated, along with the objective - metrics got by running the Trial. - - """ - # Create or coerce a protobuf request object. - # Minor optimization to avoid making a copy if the user passes - # in a vizier_service.CompleteTrialRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, vizier_service.CompleteTrialRequest): - request = vizier_service.CompleteTrialRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.complete_trial] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_trial(self, - request: Union[vizier_service.DeleteTrialRequest, dict] = None, - *, - name: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Deletes a Trial. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.DeleteTrialRequest, dict]): - The request object. Request message for - [VizierService.DeleteTrial][google.cloud.aiplatform.v1beta1.VizierService.DeleteTrial]. - name (str): - Required. The Trial's name. Format: - ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a vizier_service.DeleteTrialRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, vizier_service.DeleteTrialRequest): - request = vizier_service.DeleteTrialRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_trial] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - def check_trial_early_stopping_state(self, - request: Union[vizier_service.CheckTrialEarlyStoppingStateRequest, dict] = None, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: - r"""Checks whether a Trial should stop or not. Returns a - long-running operation. When the operation is successful, it - will contain a - [CheckTrialEarlyStoppingStateResponse][google.cloud.ml.v1.CheckTrialEarlyStoppingStateResponse]. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.CheckTrialEarlyStoppingStateRequest, dict]): - The request object. Request message for - [VizierService.CheckTrialEarlyStoppingState][google.cloud.aiplatform.v1beta1.VizierService.CheckTrialEarlyStoppingState]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.aiplatform_v1beta1.types.CheckTrialEarlyStoppingStateResponse` - Response message for - [VizierService.CheckTrialEarlyStoppingState][google.cloud.aiplatform.v1beta1.VizierService.CheckTrialEarlyStoppingState]. - - """ - # Create or coerce a protobuf request object. - # Minor optimization to avoid making a copy if the user passes - # in a vizier_service.CheckTrialEarlyStoppingStateRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, vizier_service.CheckTrialEarlyStoppingStateRequest): - request = vizier_service.CheckTrialEarlyStoppingStateRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.check_trial_early_stopping_state] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("trial_name", request.trial_name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - vizier_service.CheckTrialEarlyStoppingStateResponse, - metadata_type=vizier_service.CheckTrialEarlyStoppingStateMetatdata, - ) - - # Done; return the response. - return response - - def stop_trial(self, - request: Union[vizier_service.StopTrialRequest, dict] = None, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> study.Trial: - r"""Stops a Trial. - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.StopTrialRequest, dict]): - The request object. Request message for - [VizierService.StopTrial][google.cloud.aiplatform.v1beta1.VizierService.StopTrial]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.Trial: - A message representing a Trial. A - Trial contains a unique set of - Parameters that has been or will be - evaluated, along with the objective - metrics got by running the Trial. - - """ - # Create or coerce a protobuf request object. - # Minor optimization to avoid making a copy if the user passes - # in a vizier_service.StopTrialRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, vizier_service.StopTrialRequest): - request = vizier_service.StopTrialRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.stop_trial] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_optimal_trials(self, - request: Union[vizier_service.ListOptimalTrialsRequest, dict] = None, - *, - parent: str = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> vizier_service.ListOptimalTrialsResponse: - r"""Lists the pareto-optimal Trials for multi-objective Study or the - optimal Trials for single-objective Study. The definition of - pareto-optimal can be checked in wiki page. - https://en.wikipedia.org/wiki/Pareto_efficiency - - Args: - request (Union[google.cloud.aiplatform_v1beta1.types.ListOptimalTrialsRequest, dict]): - The request object. Request message for - [VizierService.ListOptimalTrials][google.cloud.aiplatform.v1beta1.VizierService.ListOptimalTrials]. - parent (str): - Required. The name of the Study that - the optimal Trial belongs to. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.aiplatform_v1beta1.types.ListOptimalTrialsResponse: - Response message for - [VizierService.ListOptimalTrials][google.cloud.aiplatform.v1beta1.VizierService.ListOptimalTrials]. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a vizier_service.ListOptimalTrialsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, vizier_service.ListOptimalTrialsRequest): - request = vizier_service.ListOptimalTrialsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_optimal_trials] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - """Releases underlying transport's resources. - - .. warning:: - ONLY use as a context manager if the transport is NOT shared - with other clients! Exiting the with block will CLOSE the transport - and may cause errors in other clients! - """ - self.transport.close() - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-aiplatform", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "VizierServiceClient", -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/pagers.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/pagers.py deleted file mode 100644 index 08b81423f2..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/pagers.py +++ /dev/null @@ -1,263 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator - -from google.cloud.aiplatform_v1beta1.types import study -from google.cloud.aiplatform_v1beta1.types import vizier_service - - -class ListStudiesPager: - """A pager for iterating through ``list_studies`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListStudiesResponse` object, and - provides an ``__iter__`` method to iterate through its - ``studies`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListStudies`` requests and continue to iterate - through the ``studies`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListStudiesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., vizier_service.ListStudiesResponse], - request: vizier_service.ListStudiesRequest, - response: vizier_service.ListStudiesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListStudiesRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListStudiesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = vizier_service.ListStudiesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[vizier_service.ListStudiesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[study.Study]: - for page in self.pages: - yield from page.studies - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListStudiesAsyncPager: - """A pager for iterating through ``list_studies`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListStudiesResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``studies`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListStudies`` requests and continue to iterate - through the ``studies`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListStudiesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[vizier_service.ListStudiesResponse]], - request: vizier_service.ListStudiesRequest, - response: vizier_service.ListStudiesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListStudiesRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListStudiesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = vizier_service.ListStudiesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[vizier_service.ListStudiesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[study.Study]: - async def async_generator(): - async for page in self.pages: - for response in page.studies: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListTrialsPager: - """A pager for iterating through ``list_trials`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListTrialsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``trials`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListTrials`` requests and continue to iterate - through the ``trials`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListTrialsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., vizier_service.ListTrialsResponse], - request: vizier_service.ListTrialsRequest, - response: vizier_service.ListTrialsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListTrialsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListTrialsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = vizier_service.ListTrialsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[vizier_service.ListTrialsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[study.Trial]: - for page in self.pages: - yield from page.trials - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListTrialsAsyncPager: - """A pager for iterating through ``list_trials`` requests. - - This class thinly wraps an initial - :class:`google.cloud.aiplatform_v1beta1.types.ListTrialsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``trials`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListTrials`` requests and continue to iterate - through the ``trials`` field on the - corresponding responses. - - All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListTrialsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[vizier_service.ListTrialsResponse]], - request: vizier_service.ListTrialsRequest, - response: vizier_service.ListTrialsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.aiplatform_v1beta1.types.ListTrialsRequest): - The initial request object. - response (google.cloud.aiplatform_v1beta1.types.ListTrialsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = vizier_service.ListTrialsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[vizier_service.ListTrialsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterator[study.Trial]: - async def async_generator(): - async for page in self.pages: - for response in page.trials: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/__init__.py deleted file mode 100644 index afc70ea68e..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import VizierServiceTransport -from .grpc import VizierServiceGrpcTransport -from .grpc_asyncio import VizierServiceGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[VizierServiceTransport]] -_transport_registry['grpc'] = VizierServiceGrpcTransport -_transport_registry['grpc_asyncio'] = VizierServiceGrpcAsyncIOTransport - -__all__ = ( - 'VizierServiceTransport', - 'VizierServiceGrpcTransport', - 'VizierServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/base.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/base.py deleted file mode 100644 index fe8fd1b44b..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/base.py +++ /dev/null @@ -1,352 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import pkg_resources - -import google.auth # type: ignore -import google.api_core -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.aiplatform_v1beta1.types import study -from google.cloud.aiplatform_v1beta1.types import study as gca_study -from google.cloud.aiplatform_v1beta1.types import vizier_service -from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-cloud-aiplatform', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -class VizierServiceTransport(abc.ABC): - """Abstract transport class for VizierService.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'aiplatform.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials are service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.create_study: gapic_v1.method.wrap_method( - self.create_study, - default_timeout=5.0, - client_info=client_info, - ), - self.get_study: gapic_v1.method.wrap_method( - self.get_study, - default_timeout=5.0, - client_info=client_info, - ), - self.list_studies: gapic_v1.method.wrap_method( - self.list_studies, - default_timeout=5.0, - client_info=client_info, - ), - self.delete_study: gapic_v1.method.wrap_method( - self.delete_study, - default_timeout=5.0, - client_info=client_info, - ), - self.lookup_study: gapic_v1.method.wrap_method( - self.lookup_study, - default_timeout=5.0, - client_info=client_info, - ), - self.suggest_trials: gapic_v1.method.wrap_method( - self.suggest_trials, - default_timeout=5.0, - client_info=client_info, - ), - self.create_trial: gapic_v1.method.wrap_method( - self.create_trial, - default_timeout=5.0, - client_info=client_info, - ), - self.get_trial: gapic_v1.method.wrap_method( - self.get_trial, - default_timeout=5.0, - client_info=client_info, - ), - self.list_trials: gapic_v1.method.wrap_method( - self.list_trials, - default_timeout=5.0, - client_info=client_info, - ), - self.add_trial_measurement: gapic_v1.method.wrap_method( - self.add_trial_measurement, - default_timeout=5.0, - client_info=client_info, - ), - self.complete_trial: gapic_v1.method.wrap_method( - self.complete_trial, - default_timeout=5.0, - client_info=client_info, - ), - self.delete_trial: gapic_v1.method.wrap_method( - self.delete_trial, - default_timeout=5.0, - client_info=client_info, - ), - self.check_trial_early_stopping_state: gapic_v1.method.wrap_method( - self.check_trial_early_stopping_state, - default_timeout=5.0, - client_info=client_info, - ), - self.stop_trial: gapic_v1.method.wrap_method( - self.stop_trial, - default_timeout=5.0, - client_info=client_info, - ), - self.list_optimal_trials: gapic_v1.method.wrap_method( - self.list_optimal_trials, - default_timeout=5.0, - client_info=client_info, - ), - } - - def close(self): - """Closes resources associated with the transport. - - .. warning:: - Only call this method if the transport is NOT shared - with other clients - this may cause errors in other clients! - """ - raise NotImplementedError() - - @property - def operations_client(self): - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def create_study(self) -> Callable[ - [vizier_service.CreateStudyRequest], - Union[ - gca_study.Study, - Awaitable[gca_study.Study] - ]]: - raise NotImplementedError() - - @property - def get_study(self) -> Callable[ - [vizier_service.GetStudyRequest], - Union[ - study.Study, - Awaitable[study.Study] - ]]: - raise NotImplementedError() - - @property - def list_studies(self) -> Callable[ - [vizier_service.ListStudiesRequest], - Union[ - vizier_service.ListStudiesResponse, - Awaitable[vizier_service.ListStudiesResponse] - ]]: - raise NotImplementedError() - - @property - def delete_study(self) -> Callable[ - [vizier_service.DeleteStudyRequest], - Union[ - empty_pb2.Empty, - Awaitable[empty_pb2.Empty] - ]]: - raise NotImplementedError() - - @property - def lookup_study(self) -> Callable[ - [vizier_service.LookupStudyRequest], - Union[ - study.Study, - Awaitable[study.Study] - ]]: - raise NotImplementedError() - - @property - def suggest_trials(self) -> Callable[ - [vizier_service.SuggestTrialsRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def create_trial(self) -> Callable[ - [vizier_service.CreateTrialRequest], - Union[ - study.Trial, - Awaitable[study.Trial] - ]]: - raise NotImplementedError() - - @property - def get_trial(self) -> Callable[ - [vizier_service.GetTrialRequest], - Union[ - study.Trial, - Awaitable[study.Trial] - ]]: - raise NotImplementedError() - - @property - def list_trials(self) -> Callable[ - [vizier_service.ListTrialsRequest], - Union[ - vizier_service.ListTrialsResponse, - Awaitable[vizier_service.ListTrialsResponse] - ]]: - raise NotImplementedError() - - @property - def add_trial_measurement(self) -> Callable[ - [vizier_service.AddTrialMeasurementRequest], - Union[ - study.Trial, - Awaitable[study.Trial] - ]]: - raise NotImplementedError() - - @property - def complete_trial(self) -> Callable[ - [vizier_service.CompleteTrialRequest], - Union[ - study.Trial, - Awaitable[study.Trial] - ]]: - raise NotImplementedError() - - @property - def delete_trial(self) -> Callable[ - [vizier_service.DeleteTrialRequest], - Union[ - empty_pb2.Empty, - Awaitable[empty_pb2.Empty] - ]]: - raise NotImplementedError() - - @property - def check_trial_early_stopping_state(self) -> Callable[ - [vizier_service.CheckTrialEarlyStoppingStateRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def stop_trial(self) -> Callable[ - [vizier_service.StopTrialRequest], - Union[ - study.Trial, - Awaitable[study.Trial] - ]]: - raise NotImplementedError() - - @property - def list_optimal_trials(self) -> Callable[ - [vizier_service.ListOptimalTrialsRequest], - Union[ - vizier_service.ListOptimalTrialsResponse, - Awaitable[vizier_service.ListOptimalTrialsResponse] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'VizierServiceTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc.py deleted file mode 100644 index 2ea03a5a34..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc.py +++ /dev/null @@ -1,659 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers -from google.api_core import operations_v1 -from google.api_core import gapic_v1 -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.aiplatform_v1beta1.types import study -from google.cloud.aiplatform_v1beta1.types import study as gca_study -from google.cloud.aiplatform_v1beta1.types import vizier_service -from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from .base import VizierServiceTransport, DEFAULT_CLIENT_INFO - - -class VizierServiceGrpcTransport(VizierServiceTransport): - """gRPC backend transport for VizierService. - - Vertex AI Vizier API. - Vertex AI Vizier is a service to solve blackbox optimization - problems, such as tuning machine learning hyperparameters and - searching over deep learning architectures. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_study(self) -> Callable[ - [vizier_service.CreateStudyRequest], - gca_study.Study]: - r"""Return a callable for the create study method over gRPC. - - Creates a Study. A resource name will be generated - after creation of the Study. - - Returns: - Callable[[~.CreateStudyRequest], - ~.Study]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_study' not in self._stubs: - self._stubs['create_study'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.VizierService/CreateStudy', - request_serializer=vizier_service.CreateStudyRequest.serialize, - response_deserializer=gca_study.Study.deserialize, - ) - return self._stubs['create_study'] - - @property - def get_study(self) -> Callable[ - [vizier_service.GetStudyRequest], - study.Study]: - r"""Return a callable for the get study method over gRPC. - - Gets a Study by name. - - Returns: - Callable[[~.GetStudyRequest], - ~.Study]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_study' not in self._stubs: - self._stubs['get_study'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.VizierService/GetStudy', - request_serializer=vizier_service.GetStudyRequest.serialize, - response_deserializer=study.Study.deserialize, - ) - return self._stubs['get_study'] - - @property - def list_studies(self) -> Callable[ - [vizier_service.ListStudiesRequest], - vizier_service.ListStudiesResponse]: - r"""Return a callable for the list studies method over gRPC. - - Lists all the studies in a region for an associated - project. - - Returns: - Callable[[~.ListStudiesRequest], - ~.ListStudiesResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_studies' not in self._stubs: - self._stubs['list_studies'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.VizierService/ListStudies', - request_serializer=vizier_service.ListStudiesRequest.serialize, - response_deserializer=vizier_service.ListStudiesResponse.deserialize, - ) - return self._stubs['list_studies'] - - @property - def delete_study(self) -> Callable[ - [vizier_service.DeleteStudyRequest], - empty_pb2.Empty]: - r"""Return a callable for the delete study method over gRPC. - - Deletes a Study. - - Returns: - Callable[[~.DeleteStudyRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_study' not in self._stubs: - self._stubs['delete_study'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.VizierService/DeleteStudy', - request_serializer=vizier_service.DeleteStudyRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['delete_study'] - - @property - def lookup_study(self) -> Callable[ - [vizier_service.LookupStudyRequest], - study.Study]: - r"""Return a callable for the lookup study method over gRPC. - - Looks a study up using the user-defined display_name field - instead of the fully qualified resource name. - - Returns: - Callable[[~.LookupStudyRequest], - ~.Study]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'lookup_study' not in self._stubs: - self._stubs['lookup_study'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.VizierService/LookupStudy', - request_serializer=vizier_service.LookupStudyRequest.serialize, - response_deserializer=study.Study.deserialize, - ) - return self._stubs['lookup_study'] - - @property - def suggest_trials(self) -> Callable[ - [vizier_service.SuggestTrialsRequest], - operations_pb2.Operation]: - r"""Return a callable for the suggest trials method over gRPC. - - Adds one or more Trials to a Study, with parameter values - suggested by Vertex AI Vizier. Returns a long-running operation - associated with the generation of Trial suggestions. When this - long-running operation succeeds, it will contain a - [SuggestTrialsResponse][google.cloud.ml.v1.SuggestTrialsResponse]. - - Returns: - Callable[[~.SuggestTrialsRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'suggest_trials' not in self._stubs: - self._stubs['suggest_trials'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.VizierService/SuggestTrials', - request_serializer=vizier_service.SuggestTrialsRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['suggest_trials'] - - @property - def create_trial(self) -> Callable[ - [vizier_service.CreateTrialRequest], - study.Trial]: - r"""Return a callable for the create trial method over gRPC. - - Adds a user provided Trial to a Study. - - Returns: - Callable[[~.CreateTrialRequest], - ~.Trial]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_trial' not in self._stubs: - self._stubs['create_trial'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.VizierService/CreateTrial', - request_serializer=vizier_service.CreateTrialRequest.serialize, - response_deserializer=study.Trial.deserialize, - ) - return self._stubs['create_trial'] - - @property - def get_trial(self) -> Callable[ - [vizier_service.GetTrialRequest], - study.Trial]: - r"""Return a callable for the get trial method over gRPC. - - Gets a Trial. - - Returns: - Callable[[~.GetTrialRequest], - ~.Trial]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_trial' not in self._stubs: - self._stubs['get_trial'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.VizierService/GetTrial', - request_serializer=vizier_service.GetTrialRequest.serialize, - response_deserializer=study.Trial.deserialize, - ) - return self._stubs['get_trial'] - - @property - def list_trials(self) -> Callable[ - [vizier_service.ListTrialsRequest], - vizier_service.ListTrialsResponse]: - r"""Return a callable for the list trials method over gRPC. - - Lists the Trials associated with a Study. - - Returns: - Callable[[~.ListTrialsRequest], - ~.ListTrialsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_trials' not in self._stubs: - self._stubs['list_trials'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.VizierService/ListTrials', - request_serializer=vizier_service.ListTrialsRequest.serialize, - response_deserializer=vizier_service.ListTrialsResponse.deserialize, - ) - return self._stubs['list_trials'] - - @property - def add_trial_measurement(self) -> Callable[ - [vizier_service.AddTrialMeasurementRequest], - study.Trial]: - r"""Return a callable for the add trial measurement method over gRPC. - - Adds a measurement of the objective metrics to a - Trial. This measurement is assumed to have been taken - before the Trial is complete. - - Returns: - Callable[[~.AddTrialMeasurementRequest], - ~.Trial]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'add_trial_measurement' not in self._stubs: - self._stubs['add_trial_measurement'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.VizierService/AddTrialMeasurement', - request_serializer=vizier_service.AddTrialMeasurementRequest.serialize, - response_deserializer=study.Trial.deserialize, - ) - return self._stubs['add_trial_measurement'] - - @property - def complete_trial(self) -> Callable[ - [vizier_service.CompleteTrialRequest], - study.Trial]: - r"""Return a callable for the complete trial method over gRPC. - - Marks a Trial as complete. - - Returns: - Callable[[~.CompleteTrialRequest], - ~.Trial]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'complete_trial' not in self._stubs: - self._stubs['complete_trial'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.VizierService/CompleteTrial', - request_serializer=vizier_service.CompleteTrialRequest.serialize, - response_deserializer=study.Trial.deserialize, - ) - return self._stubs['complete_trial'] - - @property - def delete_trial(self) -> Callable[ - [vizier_service.DeleteTrialRequest], - empty_pb2.Empty]: - r"""Return a callable for the delete trial method over gRPC. - - Deletes a Trial. - - Returns: - Callable[[~.DeleteTrialRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_trial' not in self._stubs: - self._stubs['delete_trial'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.VizierService/DeleteTrial', - request_serializer=vizier_service.DeleteTrialRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['delete_trial'] - - @property - def check_trial_early_stopping_state(self) -> Callable[ - [vizier_service.CheckTrialEarlyStoppingStateRequest], - operations_pb2.Operation]: - r"""Return a callable for the check trial early stopping - state method over gRPC. - - Checks whether a Trial should stop or not. Returns a - long-running operation. When the operation is successful, it - will contain a - [CheckTrialEarlyStoppingStateResponse][google.cloud.ml.v1.CheckTrialEarlyStoppingStateResponse]. - - Returns: - Callable[[~.CheckTrialEarlyStoppingStateRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'check_trial_early_stopping_state' not in self._stubs: - self._stubs['check_trial_early_stopping_state'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.VizierService/CheckTrialEarlyStoppingState', - request_serializer=vizier_service.CheckTrialEarlyStoppingStateRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['check_trial_early_stopping_state'] - - @property - def stop_trial(self) -> Callable[ - [vizier_service.StopTrialRequest], - study.Trial]: - r"""Return a callable for the stop trial method over gRPC. - - Stops a Trial. - - Returns: - Callable[[~.StopTrialRequest], - ~.Trial]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'stop_trial' not in self._stubs: - self._stubs['stop_trial'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.VizierService/StopTrial', - request_serializer=vizier_service.StopTrialRequest.serialize, - response_deserializer=study.Trial.deserialize, - ) - return self._stubs['stop_trial'] - - @property - def list_optimal_trials(self) -> Callable[ - [vizier_service.ListOptimalTrialsRequest], - vizier_service.ListOptimalTrialsResponse]: - r"""Return a callable for the list optimal trials method over gRPC. - - Lists the pareto-optimal Trials for multi-objective Study or the - optimal Trials for single-objective Study. The definition of - pareto-optimal can be checked in wiki page. - https://en.wikipedia.org/wiki/Pareto_efficiency - - Returns: - Callable[[~.ListOptimalTrialsRequest], - ~.ListOptimalTrialsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_optimal_trials' not in self._stubs: - self._stubs['list_optimal_trials'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.VizierService/ListOptimalTrials', - request_serializer=vizier_service.ListOptimalTrialsRequest.serialize, - response_deserializer=vizier_service.ListOptimalTrialsResponse.deserialize, - ) - return self._stubs['list_optimal_trials'] - - def close(self): - self.grpc_channel.close() - -__all__ = ( - 'VizierServiceGrpcTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc_asyncio.py deleted file mode 100644 index 0e3928f183..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/grpc_asyncio.py +++ /dev/null @@ -1,663 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers_async -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.aiplatform_v1beta1.types import study -from google.cloud.aiplatform_v1beta1.types import study as gca_study -from google.cloud.aiplatform_v1beta1.types import vizier_service -from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from .base import VizierServiceTransport, DEFAULT_CLIENT_INFO -from .grpc import VizierServiceGrpcTransport - - -class VizierServiceGrpcAsyncIOTransport(VizierServiceTransport): - """gRPC AsyncIO backend transport for VizierService. - - Vertex AI Vizier API. - Vertex AI Vizier is a service to solve blackbox optimization - problems, such as tuning machine learning hyperparameters and - searching over deep learning architectures. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'aiplatform.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsAsyncClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Sanity check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_study(self) -> Callable[ - [vizier_service.CreateStudyRequest], - Awaitable[gca_study.Study]]: - r"""Return a callable for the create study method over gRPC. - - Creates a Study. A resource name will be generated - after creation of the Study. - - Returns: - Callable[[~.CreateStudyRequest], - Awaitable[~.Study]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_study' not in self._stubs: - self._stubs['create_study'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.VizierService/CreateStudy', - request_serializer=vizier_service.CreateStudyRequest.serialize, - response_deserializer=gca_study.Study.deserialize, - ) - return self._stubs['create_study'] - - @property - def get_study(self) -> Callable[ - [vizier_service.GetStudyRequest], - Awaitable[study.Study]]: - r"""Return a callable for the get study method over gRPC. - - Gets a Study by name. - - Returns: - Callable[[~.GetStudyRequest], - Awaitable[~.Study]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_study' not in self._stubs: - self._stubs['get_study'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.VizierService/GetStudy', - request_serializer=vizier_service.GetStudyRequest.serialize, - response_deserializer=study.Study.deserialize, - ) - return self._stubs['get_study'] - - @property - def list_studies(self) -> Callable[ - [vizier_service.ListStudiesRequest], - Awaitable[vizier_service.ListStudiesResponse]]: - r"""Return a callable for the list studies method over gRPC. - - Lists all the studies in a region for an associated - project. - - Returns: - Callable[[~.ListStudiesRequest], - Awaitable[~.ListStudiesResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_studies' not in self._stubs: - self._stubs['list_studies'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.VizierService/ListStudies', - request_serializer=vizier_service.ListStudiesRequest.serialize, - response_deserializer=vizier_service.ListStudiesResponse.deserialize, - ) - return self._stubs['list_studies'] - - @property - def delete_study(self) -> Callable[ - [vizier_service.DeleteStudyRequest], - Awaitable[empty_pb2.Empty]]: - r"""Return a callable for the delete study method over gRPC. - - Deletes a Study. - - Returns: - Callable[[~.DeleteStudyRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_study' not in self._stubs: - self._stubs['delete_study'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.VizierService/DeleteStudy', - request_serializer=vizier_service.DeleteStudyRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['delete_study'] - - @property - def lookup_study(self) -> Callable[ - [vizier_service.LookupStudyRequest], - Awaitable[study.Study]]: - r"""Return a callable for the lookup study method over gRPC. - - Looks a study up using the user-defined display_name field - instead of the fully qualified resource name. - - Returns: - Callable[[~.LookupStudyRequest], - Awaitable[~.Study]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'lookup_study' not in self._stubs: - self._stubs['lookup_study'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.VizierService/LookupStudy', - request_serializer=vizier_service.LookupStudyRequest.serialize, - response_deserializer=study.Study.deserialize, - ) - return self._stubs['lookup_study'] - - @property - def suggest_trials(self) -> Callable[ - [vizier_service.SuggestTrialsRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the suggest trials method over gRPC. - - Adds one or more Trials to a Study, with parameter values - suggested by Vertex AI Vizier. Returns a long-running operation - associated with the generation of Trial suggestions. When this - long-running operation succeeds, it will contain a - [SuggestTrialsResponse][google.cloud.ml.v1.SuggestTrialsResponse]. - - Returns: - Callable[[~.SuggestTrialsRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'suggest_trials' not in self._stubs: - self._stubs['suggest_trials'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.VizierService/SuggestTrials', - request_serializer=vizier_service.SuggestTrialsRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['suggest_trials'] - - @property - def create_trial(self) -> Callable[ - [vizier_service.CreateTrialRequest], - Awaitable[study.Trial]]: - r"""Return a callable for the create trial method over gRPC. - - Adds a user provided Trial to a Study. - - Returns: - Callable[[~.CreateTrialRequest], - Awaitable[~.Trial]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_trial' not in self._stubs: - self._stubs['create_trial'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.VizierService/CreateTrial', - request_serializer=vizier_service.CreateTrialRequest.serialize, - response_deserializer=study.Trial.deserialize, - ) - return self._stubs['create_trial'] - - @property - def get_trial(self) -> Callable[ - [vizier_service.GetTrialRequest], - Awaitable[study.Trial]]: - r"""Return a callable for the get trial method over gRPC. - - Gets a Trial. - - Returns: - Callable[[~.GetTrialRequest], - Awaitable[~.Trial]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_trial' not in self._stubs: - self._stubs['get_trial'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.VizierService/GetTrial', - request_serializer=vizier_service.GetTrialRequest.serialize, - response_deserializer=study.Trial.deserialize, - ) - return self._stubs['get_trial'] - - @property - def list_trials(self) -> Callable[ - [vizier_service.ListTrialsRequest], - Awaitable[vizier_service.ListTrialsResponse]]: - r"""Return a callable for the list trials method over gRPC. - - Lists the Trials associated with a Study. - - Returns: - Callable[[~.ListTrialsRequest], - Awaitable[~.ListTrialsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_trials' not in self._stubs: - self._stubs['list_trials'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.VizierService/ListTrials', - request_serializer=vizier_service.ListTrialsRequest.serialize, - response_deserializer=vizier_service.ListTrialsResponse.deserialize, - ) - return self._stubs['list_trials'] - - @property - def add_trial_measurement(self) -> Callable[ - [vizier_service.AddTrialMeasurementRequest], - Awaitable[study.Trial]]: - r"""Return a callable for the add trial measurement method over gRPC. - - Adds a measurement of the objective metrics to a - Trial. This measurement is assumed to have been taken - before the Trial is complete. - - Returns: - Callable[[~.AddTrialMeasurementRequest], - Awaitable[~.Trial]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'add_trial_measurement' not in self._stubs: - self._stubs['add_trial_measurement'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.VizierService/AddTrialMeasurement', - request_serializer=vizier_service.AddTrialMeasurementRequest.serialize, - response_deserializer=study.Trial.deserialize, - ) - return self._stubs['add_trial_measurement'] - - @property - def complete_trial(self) -> Callable[ - [vizier_service.CompleteTrialRequest], - Awaitable[study.Trial]]: - r"""Return a callable for the complete trial method over gRPC. - - Marks a Trial as complete. - - Returns: - Callable[[~.CompleteTrialRequest], - Awaitable[~.Trial]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'complete_trial' not in self._stubs: - self._stubs['complete_trial'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.VizierService/CompleteTrial', - request_serializer=vizier_service.CompleteTrialRequest.serialize, - response_deserializer=study.Trial.deserialize, - ) - return self._stubs['complete_trial'] - - @property - def delete_trial(self) -> Callable[ - [vizier_service.DeleteTrialRequest], - Awaitable[empty_pb2.Empty]]: - r"""Return a callable for the delete trial method over gRPC. - - Deletes a Trial. - - Returns: - Callable[[~.DeleteTrialRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_trial' not in self._stubs: - self._stubs['delete_trial'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.VizierService/DeleteTrial', - request_serializer=vizier_service.DeleteTrialRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['delete_trial'] - - @property - def check_trial_early_stopping_state(self) -> Callable[ - [vizier_service.CheckTrialEarlyStoppingStateRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the check trial early stopping - state method over gRPC. - - Checks whether a Trial should stop or not. Returns a - long-running operation. When the operation is successful, it - will contain a - [CheckTrialEarlyStoppingStateResponse][google.cloud.ml.v1.CheckTrialEarlyStoppingStateResponse]. - - Returns: - Callable[[~.CheckTrialEarlyStoppingStateRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'check_trial_early_stopping_state' not in self._stubs: - self._stubs['check_trial_early_stopping_state'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.VizierService/CheckTrialEarlyStoppingState', - request_serializer=vizier_service.CheckTrialEarlyStoppingStateRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['check_trial_early_stopping_state'] - - @property - def stop_trial(self) -> Callable[ - [vizier_service.StopTrialRequest], - Awaitable[study.Trial]]: - r"""Return a callable for the stop trial method over gRPC. - - Stops a Trial. - - Returns: - Callable[[~.StopTrialRequest], - Awaitable[~.Trial]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'stop_trial' not in self._stubs: - self._stubs['stop_trial'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.VizierService/StopTrial', - request_serializer=vizier_service.StopTrialRequest.serialize, - response_deserializer=study.Trial.deserialize, - ) - return self._stubs['stop_trial'] - - @property - def list_optimal_trials(self) -> Callable[ - [vizier_service.ListOptimalTrialsRequest], - Awaitable[vizier_service.ListOptimalTrialsResponse]]: - r"""Return a callable for the list optimal trials method over gRPC. - - Lists the pareto-optimal Trials for multi-objective Study or the - optimal Trials for single-objective Study. The definition of - pareto-optimal can be checked in wiki page. - https://en.wikipedia.org/wiki/Pareto_efficiency - - Returns: - Callable[[~.ListOptimalTrialsRequest], - Awaitable[~.ListOptimalTrialsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_optimal_trials' not in self._stubs: - self._stubs['list_optimal_trials'] = self.grpc_channel.unary_unary( - '/google.cloud.aiplatform.v1beta1.VizierService/ListOptimalTrials', - request_serializer=vizier_service.ListOptimalTrialsRequest.serialize, - response_deserializer=vizier_service.ListOptimalTrialsResponse.deserialize, - ) - return self._stubs['list_optimal_trials'] - - def close(self): - return self.grpc_channel.close() - - -__all__ = ( - 'VizierServiceGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/__init__.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/__init__.py deleted file mode 100644 index 6de4908319..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/__init__.py +++ /dev/null @@ -1,1005 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .annotation import ( - Annotation, -) -from .annotation_spec import ( - AnnotationSpec, -) -from .artifact import ( - Artifact, -) -from .batch_prediction_job import ( - BatchPredictionJob, -) -from .completion_stats import ( - CompletionStats, -) -from .context import ( - Context, -) -from .custom_job import ( - ContainerSpec, - CustomJob, - CustomJobSpec, - PythonPackageSpec, - Scheduling, - WorkerPoolSpec, -) -from .data_item import ( - DataItem, -) -from .data_labeling_job import ( - ActiveLearningConfig, - DataLabelingJob, - SampleConfig, - TrainingConfig, -) -from .dataset import ( - Dataset, - ExportDataConfig, - ImportDataConfig, -) -from .dataset_service import ( - CreateDatasetOperationMetadata, - CreateDatasetRequest, - DeleteDatasetRequest, - ExportDataOperationMetadata, - ExportDataRequest, - ExportDataResponse, - GetAnnotationSpecRequest, - GetDatasetRequest, - ImportDataOperationMetadata, - ImportDataRequest, - ImportDataResponse, - ListAnnotationsRequest, - ListAnnotationsResponse, - ListDataItemsRequest, - ListDataItemsResponse, - ListDatasetsRequest, - ListDatasetsResponse, - UpdateDatasetRequest, -) -from .deployed_index_ref import ( - DeployedIndexRef, -) -from .deployed_model_ref import ( - DeployedModelRef, -) -from .encryption_spec import ( - EncryptionSpec, -) -from .endpoint import ( - DeployedModel, - Endpoint, - PrivateEndpoints, -) -from .endpoint_service import ( - CreateEndpointOperationMetadata, - CreateEndpointRequest, - DeleteEndpointRequest, - DeployModelOperationMetadata, - DeployModelRequest, - DeployModelResponse, - GetEndpointRequest, - ListEndpointsRequest, - ListEndpointsResponse, - UndeployModelOperationMetadata, - UndeployModelRequest, - UndeployModelResponse, - UpdateEndpointRequest, -) -from .entity_type import ( - EntityType, -) -from .env_var import ( - EnvVar, -) -from .event import ( - Event, -) -from .execution import ( - Execution, -) -from .explanation import ( - Attribution, - BlurBaselineConfig, - Explanation, - ExplanationMetadataOverride, - ExplanationParameters, - ExplanationSpec, - ExplanationSpecOverride, - FeatureNoiseSigma, - IntegratedGradientsAttribution, - ModelExplanation, - SampledShapleyAttribution, - Similarity, - SmoothGradConfig, - XraiAttribution, -) -from .explanation_metadata import ( - ExplanationMetadata, -) -from .feature import ( - Feature, -) -from .feature_monitoring_stats import ( - FeatureStatsAnomaly, -) -from .feature_selector import ( - FeatureSelector, - IdMatcher, -) -from .featurestore import ( - Featurestore, -) -from .featurestore_monitoring import ( - FeaturestoreMonitoringConfig, -) -from .featurestore_online_service import ( - FeatureValue, - FeatureValueList, - ReadFeatureValuesRequest, - ReadFeatureValuesResponse, - StreamingReadFeatureValuesRequest, -) -from .featurestore_service import ( - BatchCreateFeaturesOperationMetadata, - BatchCreateFeaturesRequest, - BatchCreateFeaturesResponse, - BatchReadFeatureValuesOperationMetadata, - BatchReadFeatureValuesRequest, - BatchReadFeatureValuesResponse, - CreateEntityTypeOperationMetadata, - CreateEntityTypeRequest, - CreateFeatureOperationMetadata, - CreateFeatureRequest, - CreateFeaturestoreOperationMetadata, - CreateFeaturestoreRequest, - DeleteEntityTypeRequest, - DeleteFeatureRequest, - DeleteFeaturestoreRequest, - DestinationFeatureSetting, - ExportFeatureValuesOperationMetadata, - ExportFeatureValuesRequest, - ExportFeatureValuesResponse, - FeatureValueDestination, - GetEntityTypeRequest, - GetFeatureRequest, - GetFeaturestoreRequest, - ImportFeatureValuesOperationMetadata, - ImportFeatureValuesRequest, - ImportFeatureValuesResponse, - ListEntityTypesRequest, - ListEntityTypesResponse, - ListFeaturesRequest, - ListFeaturesResponse, - ListFeaturestoresRequest, - ListFeaturestoresResponse, - SearchFeaturesRequest, - SearchFeaturesResponse, - UpdateEntityTypeRequest, - UpdateFeatureRequest, - UpdateFeaturestoreOperationMetadata, - UpdateFeaturestoreRequest, -) -from .hyperparameter_tuning_job import ( - HyperparameterTuningJob, -) -from .index import ( - Index, -) -from .index_endpoint import ( - DeployedIndex, - DeployedIndexAuthConfig, - IndexEndpoint, - IndexPrivateEndpoints, -) -from .index_endpoint_service import ( - CreateIndexEndpointOperationMetadata, - CreateIndexEndpointRequest, - DeleteIndexEndpointRequest, - DeployIndexOperationMetadata, - DeployIndexRequest, - DeployIndexResponse, - GetIndexEndpointRequest, - ListIndexEndpointsRequest, - ListIndexEndpointsResponse, - MutateDeployedIndexOperationMetadata, - MutateDeployedIndexRequest, - MutateDeployedIndexResponse, - UndeployIndexOperationMetadata, - UndeployIndexRequest, - UndeployIndexResponse, - UpdateIndexEndpointRequest, -) -from .index_service import ( - CreateIndexOperationMetadata, - CreateIndexRequest, - DeleteIndexRequest, - GetIndexRequest, - ListIndexesRequest, - ListIndexesResponse, - NearestNeighborSearchOperationMetadata, - UpdateIndexOperationMetadata, - UpdateIndexRequest, -) -from .io import ( - AvroSource, - BigQueryDestination, - BigQuerySource, - ContainerRegistryDestination, - CsvDestination, - CsvSource, - GcsDestination, - GcsSource, - TFRecordDestination, -) -from .job_service import ( - CancelBatchPredictionJobRequest, - CancelCustomJobRequest, - CancelDataLabelingJobRequest, - CancelHyperparameterTuningJobRequest, - CreateBatchPredictionJobRequest, - CreateCustomJobRequest, - CreateDataLabelingJobRequest, - CreateHyperparameterTuningJobRequest, - CreateModelDeploymentMonitoringJobRequest, - DeleteBatchPredictionJobRequest, - DeleteCustomJobRequest, - DeleteDataLabelingJobRequest, - DeleteHyperparameterTuningJobRequest, - DeleteModelDeploymentMonitoringJobRequest, - GetBatchPredictionJobRequest, - GetCustomJobRequest, - GetDataLabelingJobRequest, - GetHyperparameterTuningJobRequest, - GetModelDeploymentMonitoringJobRequest, - ListBatchPredictionJobsRequest, - ListBatchPredictionJobsResponse, - ListCustomJobsRequest, - ListCustomJobsResponse, - ListDataLabelingJobsRequest, - ListDataLabelingJobsResponse, - ListHyperparameterTuningJobsRequest, - ListHyperparameterTuningJobsResponse, - ListModelDeploymentMonitoringJobsRequest, - ListModelDeploymentMonitoringJobsResponse, - PauseModelDeploymentMonitoringJobRequest, - ResumeModelDeploymentMonitoringJobRequest, - SearchModelDeploymentMonitoringStatsAnomaliesRequest, - SearchModelDeploymentMonitoringStatsAnomaliesResponse, - UpdateModelDeploymentMonitoringJobOperationMetadata, - UpdateModelDeploymentMonitoringJobRequest, -) -from .lineage_subgraph import ( - LineageSubgraph, -) -from .machine_resources import ( - AutomaticResources, - AutoscalingMetricSpec, - BatchDedicatedResources, - DedicatedResources, - DiskSpec, - MachineSpec, - ResourcesConsumed, -) -from .manual_batch_tuning_parameters import ( - ManualBatchTuningParameters, -) -from .metadata_schema import ( - MetadataSchema, -) -from .metadata_service import ( - AddContextArtifactsAndExecutionsRequest, - AddContextArtifactsAndExecutionsResponse, - AddContextChildrenRequest, - AddContextChildrenResponse, - AddExecutionEventsRequest, - AddExecutionEventsResponse, - CreateArtifactRequest, - CreateContextRequest, - CreateExecutionRequest, - CreateMetadataSchemaRequest, - CreateMetadataStoreOperationMetadata, - CreateMetadataStoreRequest, - DeleteArtifactRequest, - DeleteContextRequest, - DeleteExecutionRequest, - DeleteMetadataStoreOperationMetadata, - DeleteMetadataStoreRequest, - GetArtifactRequest, - GetContextRequest, - GetExecutionRequest, - GetMetadataSchemaRequest, - GetMetadataStoreRequest, - ListArtifactsRequest, - ListArtifactsResponse, - ListContextsRequest, - ListContextsResponse, - ListExecutionsRequest, - ListExecutionsResponse, - ListMetadataSchemasRequest, - ListMetadataSchemasResponse, - ListMetadataStoresRequest, - ListMetadataStoresResponse, - PurgeArtifactsMetadata, - PurgeArtifactsRequest, - PurgeArtifactsResponse, - PurgeContextsMetadata, - PurgeContextsRequest, - PurgeContextsResponse, - PurgeExecutionsMetadata, - PurgeExecutionsRequest, - PurgeExecutionsResponse, - QueryArtifactLineageSubgraphRequest, - QueryContextLineageSubgraphRequest, - QueryExecutionInputsAndOutputsRequest, - UpdateArtifactRequest, - UpdateContextRequest, - UpdateExecutionRequest, -) -from .metadata_store import ( - MetadataStore, -) -from .migratable_resource import ( - MigratableResource, -) -from .migration_service import ( - BatchMigrateResourcesOperationMetadata, - BatchMigrateResourcesRequest, - BatchMigrateResourcesResponse, - MigrateResourceRequest, - MigrateResourceResponse, - SearchMigratableResourcesRequest, - SearchMigratableResourcesResponse, -) -from .model import ( - Model, - ModelContainerSpec, - Port, - PredictSchemata, -) -from .model_deployment_monitoring_job import ( - ModelDeploymentMonitoringBigQueryTable, - ModelDeploymentMonitoringJob, - ModelDeploymentMonitoringObjectiveConfig, - ModelDeploymentMonitoringScheduleConfig, - ModelMonitoringStatsAnomalies, - ModelDeploymentMonitoringObjectiveType, -) -from .model_evaluation import ( - ModelEvaluation, -) -from .model_evaluation_slice import ( - ModelEvaluationSlice, -) -from .model_monitoring import ( - ModelMonitoringAlertConfig, - ModelMonitoringObjectiveConfig, - SamplingStrategy, - ThresholdConfig, -) -from .model_service import ( - DeleteModelRequest, - ExportModelOperationMetadata, - ExportModelRequest, - ExportModelResponse, - GetModelEvaluationRequest, - GetModelEvaluationSliceRequest, - GetModelRequest, - ListModelEvaluationSlicesRequest, - ListModelEvaluationSlicesResponse, - ListModelEvaluationsRequest, - ListModelEvaluationsResponse, - ListModelsRequest, - ListModelsResponse, - UpdateModelRequest, - UploadModelOperationMetadata, - UploadModelRequest, - UploadModelResponse, -) -from .operation import ( - DeleteOperationMetadata, - GenericOperationMetadata, -) -from .pipeline_job import ( - PipelineJob, - PipelineJobDetail, - PipelineTaskDetail, - PipelineTaskExecutorDetail, -) -from .pipeline_service import ( - CancelPipelineJobRequest, - CancelTrainingPipelineRequest, - CreatePipelineJobRequest, - CreateTrainingPipelineRequest, - DeletePipelineJobRequest, - DeleteTrainingPipelineRequest, - GetPipelineJobRequest, - GetTrainingPipelineRequest, - ListPipelineJobsRequest, - ListPipelineJobsResponse, - ListTrainingPipelinesRequest, - ListTrainingPipelinesResponse, -) -from .prediction_service import ( - ExplainRequest, - ExplainResponse, - PredictRequest, - PredictResponse, - RawPredictRequest, -) -from .specialist_pool import ( - SpecialistPool, -) -from .specialist_pool_service import ( - CreateSpecialistPoolOperationMetadata, - CreateSpecialistPoolRequest, - DeleteSpecialistPoolRequest, - GetSpecialistPoolRequest, - ListSpecialistPoolsRequest, - ListSpecialistPoolsResponse, - UpdateSpecialistPoolOperationMetadata, - UpdateSpecialistPoolRequest, -) -from .study import ( - Measurement, - Study, - StudySpec, - Trial, -) -from .tensorboard import ( - Tensorboard, -) -from .tensorboard_data import ( - Scalar, - TensorboardBlob, - TensorboardBlobSequence, - TensorboardTensor, - TimeSeriesData, - TimeSeriesDataPoint, -) -from .tensorboard_experiment import ( - TensorboardExperiment, -) -from .tensorboard_run import ( - TensorboardRun, -) -from .tensorboard_service import ( - BatchCreateTensorboardRunsRequest, - BatchCreateTensorboardRunsResponse, - BatchCreateTensorboardTimeSeriesRequest, - BatchCreateTensorboardTimeSeriesResponse, - BatchReadTensorboardTimeSeriesDataRequest, - BatchReadTensorboardTimeSeriesDataResponse, - CreateTensorboardExperimentRequest, - CreateTensorboardOperationMetadata, - CreateTensorboardRequest, - CreateTensorboardRunRequest, - CreateTensorboardTimeSeriesRequest, - DeleteTensorboardExperimentRequest, - DeleteTensorboardRequest, - DeleteTensorboardRunRequest, - DeleteTensorboardTimeSeriesRequest, - ExportTensorboardTimeSeriesDataRequest, - ExportTensorboardTimeSeriesDataResponse, - GetTensorboardExperimentRequest, - GetTensorboardRequest, - GetTensorboardRunRequest, - GetTensorboardTimeSeriesRequest, - ListTensorboardExperimentsRequest, - ListTensorboardExperimentsResponse, - ListTensorboardRunsRequest, - ListTensorboardRunsResponse, - ListTensorboardsRequest, - ListTensorboardsResponse, - ListTensorboardTimeSeriesRequest, - ListTensorboardTimeSeriesResponse, - ReadTensorboardBlobDataRequest, - ReadTensorboardBlobDataResponse, - ReadTensorboardTimeSeriesDataRequest, - ReadTensorboardTimeSeriesDataResponse, - UpdateTensorboardExperimentRequest, - UpdateTensorboardOperationMetadata, - UpdateTensorboardRequest, - UpdateTensorboardRunRequest, - UpdateTensorboardTimeSeriesRequest, - WriteTensorboardExperimentDataRequest, - WriteTensorboardExperimentDataResponse, - WriteTensorboardRunDataRequest, - WriteTensorboardRunDataResponse, -) -from .tensorboard_time_series import ( - TensorboardTimeSeries, -) -from .training_pipeline import ( - FilterSplit, - FractionSplit, - InputDataConfig, - PredefinedSplit, - StratifiedSplit, - TimestampSplit, - TrainingPipeline, -) -from .types import ( - BoolArray, - DoubleArray, - Int64Array, - StringArray, -) -from .unmanaged_container_model import ( - UnmanagedContainerModel, -) -from .user_action_reference import ( - UserActionReference, -) -from .value import ( - Value, -) -from .vizier_service import ( - AddTrialMeasurementRequest, - CheckTrialEarlyStoppingStateMetatdata, - CheckTrialEarlyStoppingStateRequest, - CheckTrialEarlyStoppingStateResponse, - CompleteTrialRequest, - CreateStudyRequest, - CreateTrialRequest, - DeleteStudyRequest, - DeleteTrialRequest, - GetStudyRequest, - GetTrialRequest, - ListOptimalTrialsRequest, - ListOptimalTrialsResponse, - ListStudiesRequest, - ListStudiesResponse, - ListTrialsRequest, - ListTrialsResponse, - LookupStudyRequest, - StopTrialRequest, - SuggestTrialsMetadata, - SuggestTrialsRequest, - SuggestTrialsResponse, -) - -__all__ = ( - 'AcceleratorType', - 'Annotation', - 'AnnotationSpec', - 'Artifact', - 'BatchPredictionJob', - 'CompletionStats', - 'Context', - 'ContainerSpec', - 'CustomJob', - 'CustomJobSpec', - 'PythonPackageSpec', - 'Scheduling', - 'WorkerPoolSpec', - 'DataItem', - 'ActiveLearningConfig', - 'DataLabelingJob', - 'SampleConfig', - 'TrainingConfig', - 'Dataset', - 'ExportDataConfig', - 'ImportDataConfig', - 'CreateDatasetOperationMetadata', - 'CreateDatasetRequest', - 'DeleteDatasetRequest', - 'ExportDataOperationMetadata', - 'ExportDataRequest', - 'ExportDataResponse', - 'GetAnnotationSpecRequest', - 'GetDatasetRequest', - 'ImportDataOperationMetadata', - 'ImportDataRequest', - 'ImportDataResponse', - 'ListAnnotationsRequest', - 'ListAnnotationsResponse', - 'ListDataItemsRequest', - 'ListDataItemsResponse', - 'ListDatasetsRequest', - 'ListDatasetsResponse', - 'UpdateDatasetRequest', - 'DeployedIndexRef', - 'DeployedModelRef', - 'EncryptionSpec', - 'DeployedModel', - 'Endpoint', - 'PrivateEndpoints', - 'CreateEndpointOperationMetadata', - 'CreateEndpointRequest', - 'DeleteEndpointRequest', - 'DeployModelOperationMetadata', - 'DeployModelRequest', - 'DeployModelResponse', - 'GetEndpointRequest', - 'ListEndpointsRequest', - 'ListEndpointsResponse', - 'UndeployModelOperationMetadata', - 'UndeployModelRequest', - 'UndeployModelResponse', - 'UpdateEndpointRequest', - 'EntityType', - 'EnvVar', - 'Event', - 'Execution', - 'Attribution', - 'BlurBaselineConfig', - 'Explanation', - 'ExplanationMetadataOverride', - 'ExplanationParameters', - 'ExplanationSpec', - 'ExplanationSpecOverride', - 'FeatureNoiseSigma', - 'IntegratedGradientsAttribution', - 'ModelExplanation', - 'SampledShapleyAttribution', - 'Similarity', - 'SmoothGradConfig', - 'XraiAttribution', - 'ExplanationMetadata', - 'Feature', - 'FeatureStatsAnomaly', - 'FeatureSelector', - 'IdMatcher', - 'Featurestore', - 'FeaturestoreMonitoringConfig', - 'FeatureValue', - 'FeatureValueList', - 'ReadFeatureValuesRequest', - 'ReadFeatureValuesResponse', - 'StreamingReadFeatureValuesRequest', - 'BatchCreateFeaturesOperationMetadata', - 'BatchCreateFeaturesRequest', - 'BatchCreateFeaturesResponse', - 'BatchReadFeatureValuesOperationMetadata', - 'BatchReadFeatureValuesRequest', - 'BatchReadFeatureValuesResponse', - 'CreateEntityTypeOperationMetadata', - 'CreateEntityTypeRequest', - 'CreateFeatureOperationMetadata', - 'CreateFeatureRequest', - 'CreateFeaturestoreOperationMetadata', - 'CreateFeaturestoreRequest', - 'DeleteEntityTypeRequest', - 'DeleteFeatureRequest', - 'DeleteFeaturestoreRequest', - 'DestinationFeatureSetting', - 'ExportFeatureValuesOperationMetadata', - 'ExportFeatureValuesRequest', - 'ExportFeatureValuesResponse', - 'FeatureValueDestination', - 'GetEntityTypeRequest', - 'GetFeatureRequest', - 'GetFeaturestoreRequest', - 'ImportFeatureValuesOperationMetadata', - 'ImportFeatureValuesRequest', - 'ImportFeatureValuesResponse', - 'ListEntityTypesRequest', - 'ListEntityTypesResponse', - 'ListFeaturesRequest', - 'ListFeaturesResponse', - 'ListFeaturestoresRequest', - 'ListFeaturestoresResponse', - 'SearchFeaturesRequest', - 'SearchFeaturesResponse', - 'UpdateEntityTypeRequest', - 'UpdateFeatureRequest', - 'UpdateFeaturestoreOperationMetadata', - 'UpdateFeaturestoreRequest', - 'HyperparameterTuningJob', - 'Index', - 'DeployedIndex', - 'DeployedIndexAuthConfig', - 'IndexEndpoint', - 'IndexPrivateEndpoints', - 'CreateIndexEndpointOperationMetadata', - 'CreateIndexEndpointRequest', - 'DeleteIndexEndpointRequest', - 'DeployIndexOperationMetadata', - 'DeployIndexRequest', - 'DeployIndexResponse', - 'GetIndexEndpointRequest', - 'ListIndexEndpointsRequest', - 'ListIndexEndpointsResponse', - 'MutateDeployedIndexOperationMetadata', - 'MutateDeployedIndexRequest', - 'MutateDeployedIndexResponse', - 'UndeployIndexOperationMetadata', - 'UndeployIndexRequest', - 'UndeployIndexResponse', - 'UpdateIndexEndpointRequest', - 'CreateIndexOperationMetadata', - 'CreateIndexRequest', - 'DeleteIndexRequest', - 'GetIndexRequest', - 'ListIndexesRequest', - 'ListIndexesResponse', - 'NearestNeighborSearchOperationMetadata', - 'UpdateIndexOperationMetadata', - 'UpdateIndexRequest', - 'AvroSource', - 'BigQueryDestination', - 'BigQuerySource', - 'ContainerRegistryDestination', - 'CsvDestination', - 'CsvSource', - 'GcsDestination', - 'GcsSource', - 'TFRecordDestination', - 'CancelBatchPredictionJobRequest', - 'CancelCustomJobRequest', - 'CancelDataLabelingJobRequest', - 'CancelHyperparameterTuningJobRequest', - 'CreateBatchPredictionJobRequest', - 'CreateCustomJobRequest', - 'CreateDataLabelingJobRequest', - 'CreateHyperparameterTuningJobRequest', - 'CreateModelDeploymentMonitoringJobRequest', - 'DeleteBatchPredictionJobRequest', - 'DeleteCustomJobRequest', - 'DeleteDataLabelingJobRequest', - 'DeleteHyperparameterTuningJobRequest', - 'DeleteModelDeploymentMonitoringJobRequest', - 'GetBatchPredictionJobRequest', - 'GetCustomJobRequest', - 'GetDataLabelingJobRequest', - 'GetHyperparameterTuningJobRequest', - 'GetModelDeploymentMonitoringJobRequest', - 'ListBatchPredictionJobsRequest', - 'ListBatchPredictionJobsResponse', - 'ListCustomJobsRequest', - 'ListCustomJobsResponse', - 'ListDataLabelingJobsRequest', - 'ListDataLabelingJobsResponse', - 'ListHyperparameterTuningJobsRequest', - 'ListHyperparameterTuningJobsResponse', - 'ListModelDeploymentMonitoringJobsRequest', - 'ListModelDeploymentMonitoringJobsResponse', - 'PauseModelDeploymentMonitoringJobRequest', - 'ResumeModelDeploymentMonitoringJobRequest', - 'SearchModelDeploymentMonitoringStatsAnomaliesRequest', - 'SearchModelDeploymentMonitoringStatsAnomaliesResponse', - 'UpdateModelDeploymentMonitoringJobOperationMetadata', - 'UpdateModelDeploymentMonitoringJobRequest', - 'JobState', - 'LineageSubgraph', - 'AutomaticResources', - 'AutoscalingMetricSpec', - 'BatchDedicatedResources', - 'DedicatedResources', - 'DiskSpec', - 'MachineSpec', - 'ResourcesConsumed', - 'ManualBatchTuningParameters', - 'MetadataSchema', - 'AddContextArtifactsAndExecutionsRequest', - 'AddContextArtifactsAndExecutionsResponse', - 'AddContextChildrenRequest', - 'AddContextChildrenResponse', - 'AddExecutionEventsRequest', - 'AddExecutionEventsResponse', - 'CreateArtifactRequest', - 'CreateContextRequest', - 'CreateExecutionRequest', - 'CreateMetadataSchemaRequest', - 'CreateMetadataStoreOperationMetadata', - 'CreateMetadataStoreRequest', - 'DeleteArtifactRequest', - 'DeleteContextRequest', - 'DeleteExecutionRequest', - 'DeleteMetadataStoreOperationMetadata', - 'DeleteMetadataStoreRequest', - 'GetArtifactRequest', - 'GetContextRequest', - 'GetExecutionRequest', - 'GetMetadataSchemaRequest', - 'GetMetadataStoreRequest', - 'ListArtifactsRequest', - 'ListArtifactsResponse', - 'ListContextsRequest', - 'ListContextsResponse', - 'ListExecutionsRequest', - 'ListExecutionsResponse', - 'ListMetadataSchemasRequest', - 'ListMetadataSchemasResponse', - 'ListMetadataStoresRequest', - 'ListMetadataStoresResponse', - 'PurgeArtifactsMetadata', - 'PurgeArtifactsRequest', - 'PurgeArtifactsResponse', - 'PurgeContextsMetadata', - 'PurgeContextsRequest', - 'PurgeContextsResponse', - 'PurgeExecutionsMetadata', - 'PurgeExecutionsRequest', - 'PurgeExecutionsResponse', - 'QueryArtifactLineageSubgraphRequest', - 'QueryContextLineageSubgraphRequest', - 'QueryExecutionInputsAndOutputsRequest', - 'UpdateArtifactRequest', - 'UpdateContextRequest', - 'UpdateExecutionRequest', - 'MetadataStore', - 'MigratableResource', - 'BatchMigrateResourcesOperationMetadata', - 'BatchMigrateResourcesRequest', - 'BatchMigrateResourcesResponse', - 'MigrateResourceRequest', - 'MigrateResourceResponse', - 'SearchMigratableResourcesRequest', - 'SearchMigratableResourcesResponse', - 'Model', - 'ModelContainerSpec', - 'Port', - 'PredictSchemata', - 'ModelDeploymentMonitoringBigQueryTable', - 'ModelDeploymentMonitoringJob', - 'ModelDeploymentMonitoringObjectiveConfig', - 'ModelDeploymentMonitoringScheduleConfig', - 'ModelMonitoringStatsAnomalies', - 'ModelDeploymentMonitoringObjectiveType', - 'ModelEvaluation', - 'ModelEvaluationSlice', - 'ModelMonitoringAlertConfig', - 'ModelMonitoringObjectiveConfig', - 'SamplingStrategy', - 'ThresholdConfig', - 'DeleteModelRequest', - 'ExportModelOperationMetadata', - 'ExportModelRequest', - 'ExportModelResponse', - 'GetModelEvaluationRequest', - 'GetModelEvaluationSliceRequest', - 'GetModelRequest', - 'ListModelEvaluationSlicesRequest', - 'ListModelEvaluationSlicesResponse', - 'ListModelEvaluationsRequest', - 'ListModelEvaluationsResponse', - 'ListModelsRequest', - 'ListModelsResponse', - 'UpdateModelRequest', - 'UploadModelOperationMetadata', - 'UploadModelRequest', - 'UploadModelResponse', - 'DeleteOperationMetadata', - 'GenericOperationMetadata', - 'PipelineJob', - 'PipelineJobDetail', - 'PipelineTaskDetail', - 'PipelineTaskExecutorDetail', - 'CancelPipelineJobRequest', - 'CancelTrainingPipelineRequest', - 'CreatePipelineJobRequest', - 'CreateTrainingPipelineRequest', - 'DeletePipelineJobRequest', - 'DeleteTrainingPipelineRequest', - 'GetPipelineJobRequest', - 'GetTrainingPipelineRequest', - 'ListPipelineJobsRequest', - 'ListPipelineJobsResponse', - 'ListTrainingPipelinesRequest', - 'ListTrainingPipelinesResponse', - 'PipelineState', - 'ExplainRequest', - 'ExplainResponse', - 'PredictRequest', - 'PredictResponse', - 'RawPredictRequest', - 'SpecialistPool', - 'CreateSpecialistPoolOperationMetadata', - 'CreateSpecialistPoolRequest', - 'DeleteSpecialistPoolRequest', - 'GetSpecialistPoolRequest', - 'ListSpecialistPoolsRequest', - 'ListSpecialistPoolsResponse', - 'UpdateSpecialistPoolOperationMetadata', - 'UpdateSpecialistPoolRequest', - 'Measurement', - 'Study', - 'StudySpec', - 'Trial', - 'Tensorboard', - 'Scalar', - 'TensorboardBlob', - 'TensorboardBlobSequence', - 'TensorboardTensor', - 'TimeSeriesData', - 'TimeSeriesDataPoint', - 'TensorboardExperiment', - 'TensorboardRun', - 'BatchCreateTensorboardRunsRequest', - 'BatchCreateTensorboardRunsResponse', - 'BatchCreateTensorboardTimeSeriesRequest', - 'BatchCreateTensorboardTimeSeriesResponse', - 'BatchReadTensorboardTimeSeriesDataRequest', - 'BatchReadTensorboardTimeSeriesDataResponse', - 'CreateTensorboardExperimentRequest', - 'CreateTensorboardOperationMetadata', - 'CreateTensorboardRequest', - 'CreateTensorboardRunRequest', - 'CreateTensorboardTimeSeriesRequest', - 'DeleteTensorboardExperimentRequest', - 'DeleteTensorboardRequest', - 'DeleteTensorboardRunRequest', - 'DeleteTensorboardTimeSeriesRequest', - 'ExportTensorboardTimeSeriesDataRequest', - 'ExportTensorboardTimeSeriesDataResponse', - 'GetTensorboardExperimentRequest', - 'GetTensorboardRequest', - 'GetTensorboardRunRequest', - 'GetTensorboardTimeSeriesRequest', - 'ListTensorboardExperimentsRequest', - 'ListTensorboardExperimentsResponse', - 'ListTensorboardRunsRequest', - 'ListTensorboardRunsResponse', - 'ListTensorboardsRequest', - 'ListTensorboardsResponse', - 'ListTensorboardTimeSeriesRequest', - 'ListTensorboardTimeSeriesResponse', - 'ReadTensorboardBlobDataRequest', - 'ReadTensorboardBlobDataResponse', - 'ReadTensorboardTimeSeriesDataRequest', - 'ReadTensorboardTimeSeriesDataResponse', - 'UpdateTensorboardExperimentRequest', - 'UpdateTensorboardOperationMetadata', - 'UpdateTensorboardRequest', - 'UpdateTensorboardRunRequest', - 'UpdateTensorboardTimeSeriesRequest', - 'WriteTensorboardExperimentDataRequest', - 'WriteTensorboardExperimentDataResponse', - 'WriteTensorboardRunDataRequest', - 'WriteTensorboardRunDataResponse', - 'TensorboardTimeSeries', - 'FilterSplit', - 'FractionSplit', - 'InputDataConfig', - 'PredefinedSplit', - 'StratifiedSplit', - 'TimestampSplit', - 'TrainingPipeline', - 'BoolArray', - 'DoubleArray', - 'Int64Array', - 'StringArray', - 'UnmanagedContainerModel', - 'UserActionReference', - 'Value', - 'AddTrialMeasurementRequest', - 'CheckTrialEarlyStoppingStateMetatdata', - 'CheckTrialEarlyStoppingStateRequest', - 'CheckTrialEarlyStoppingStateResponse', - 'CompleteTrialRequest', - 'CreateStudyRequest', - 'CreateTrialRequest', - 'DeleteStudyRequest', - 'DeleteTrialRequest', - 'GetStudyRequest', - 'GetTrialRequest', - 'ListOptimalTrialsRequest', - 'ListOptimalTrialsResponse', - 'ListStudiesRequest', - 'ListStudiesResponse', - 'ListTrialsRequest', - 'ListTrialsResponse', - 'LookupStudyRequest', - 'StopTrialRequest', - 'SuggestTrialsMetadata', - 'SuggestTrialsRequest', - 'SuggestTrialsResponse', -) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/accelerator_type.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/accelerator_type.py deleted file mode 100644 index 3e2b8a46f4..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/accelerator_type.py +++ /dev/null @@ -1,38 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'AcceleratorType', - }, -) - - -class AcceleratorType(proto.Enum): - r"""Represents a hardware accelerator type.""" - ACCELERATOR_TYPE_UNSPECIFIED = 0 - NVIDIA_TESLA_K80 = 1 - NVIDIA_TESLA_P100 = 2 - NVIDIA_TESLA_V100 = 3 - NVIDIA_TESLA_P4 = 4 - NVIDIA_TESLA_T4 = 5 - NVIDIA_TESLA_A100 = 8 - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/annotation.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/annotation.py deleted file mode 100644 index 526bcad4d2..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/annotation.py +++ /dev/null @@ -1,129 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import user_action_reference -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'Annotation', - }, -) - - -class Annotation(proto.Message): - r"""Used to assign specific AnnotationSpec to a particular area - of a DataItem or the whole part of the DataItem. - - Attributes: - name (str): - Output only. Resource name of the Annotation. - payload_schema_uri (str): - Required. Google Cloud Storage URI points to a YAML file - describing - [payload][google.cloud.aiplatform.v1beta1.Annotation.payload]. - The schema is defined as an `OpenAPI 3.0.2 Schema - Object `__. - The schema files that can be used here are found in - gs://google-cloud-aiplatform/schema/dataset/annotation/, - note that the chosen schema must be consistent with the - parent Dataset's - [metadata][google.cloud.aiplatform.v1beta1.Dataset.metadata_schema_uri]. - payload (google.protobuf.struct_pb2.Value): - Required. The schema of the payload can be found in - [payload_schema][google.cloud.aiplatform.v1beta1.Annotation.payload_schema_uri]. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Annotation - was created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Annotation - was last updated. - etag (str): - Optional. Used to perform consistent read- - odify-write updates. If not set, a blind - "overwrite" update happens. - annotation_source (google.cloud.aiplatform_v1beta1.types.UserActionReference): - Output only. The source of the Annotation. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.Annotation.LabelsEntry]): - Optional. The labels with user-defined metadata to organize - your Annotations. - - Label keys and values can be no longer than 64 characters - (Unicode codepoints), can only contain lowercase letters, - numeric characters, underscores and dashes. International - characters are allowed. No more than 64 user labels can be - associated with one Annotation(System labels are excluded). - - See https://goo.gl/xmQnxf for more information and examples - of labels. System reserved label keys are prefixed with - "aiplatform.googleapis.com/" and are immutable. Following - system labels exist for each Annotation: - - - "aiplatform.googleapis.com/annotation_set_name": - optional, name of the UI's annotation set this Annotation - belongs to. If not set, the Annotation is not visible in - the UI. - - - "aiplatform.googleapis.com/payload_schema": output only, - its value is the - [payload_schema's][google.cloud.aiplatform.v1beta1.Annotation.payload_schema_uri] - title. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - payload_schema_uri = proto.Field( - proto.STRING, - number=2, - ) - payload = proto.Field( - proto.MESSAGE, - number=3, - message=struct_pb2.Value, - ) - create_time = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=7, - message=timestamp_pb2.Timestamp, - ) - etag = proto.Field( - proto.STRING, - number=8, - ) - annotation_source = proto.Field( - proto.MESSAGE, - number=5, - message=user_action_reference.UserActionReference, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=6, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/annotation_spec.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/annotation_spec.py deleted file mode 100644 index a254682a5c..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/annotation_spec.py +++ /dev/null @@ -1,78 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'AnnotationSpec', - }, -) - - -class AnnotationSpec(proto.Message): - r"""Identifies a concept with which DataItems may be annotated - with. - - Attributes: - name (str): - Output only. Resource name of the - AnnotationSpec. - display_name (str): - Required. The user-defined name of the - AnnotationSpec. The name can be up to 128 - characters long and can be consist of any UTF-8 - characters. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - AnnotationSpec was created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when AnnotationSpec - was last updated. - etag (str): - Optional. Used to perform consistent read- - odify-write updates. If not set, a blind - "overwrite" update happens. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - create_time = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - etag = proto.Field( - proto.STRING, - number=5, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/artifact.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/artifact.py deleted file mode 100644 index b1f51ec173..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/artifact.py +++ /dev/null @@ -1,153 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'Artifact', - }, -) - - -class Artifact(proto.Message): - r"""Instance of a general artifact. - - Attributes: - name (str): - Output only. The resource name of the - Artifact. - display_name (str): - User provided display name of the Artifact. - May be up to 128 Unicode characters. - uri (str): - The uniform resource identifier of the - artifact file. May be empty if there is no - actual artifact file. - etag (str): - An eTag used to perform consistent read- - odify-write updates. If not set, a blind - "overwrite" update happens. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.Artifact.LabelsEntry]): - The labels with user-defined metadata to - organize your Artifacts. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. No more than 64 user labels can be - associated with one Artifact (System labels are - excluded). - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Artifact was - created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Artifact was - last updated. - state (google.cloud.aiplatform_v1beta1.types.Artifact.State): - The state of this Artifact. This is a - property of the Artifact, and does not imply or - capture any ongoing process. This property is - managed by clients (such as Vertex AI - Pipelines), and the system does not prescribe or - check the validity of state transitions. - schema_title (str): - The title of the schema describing the - metadata. - Schema title and version is expected to be - registered in earlier Create Schema calls. And - both are used together as unique identifiers to - identify schemas within the local metadata - store. - schema_version (str): - The version of the schema in schema_name to use. - - Schema title and version is expected to be registered in - earlier Create Schema calls. And both are used together as - unique identifiers to identify schemas within the local - metadata store. - metadata (google.protobuf.struct_pb2.Struct): - Properties of the Artifact. - The size of this field should not exceed 200KB. - description (str): - Description of the Artifact - """ - class State(proto.Enum): - r"""Describes the state of the Artifact.""" - STATE_UNSPECIFIED = 0 - PENDING = 1 - LIVE = 2 - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - uri = proto.Field( - proto.STRING, - number=6, - ) - etag = proto.Field( - proto.STRING, - number=9, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=10, - ) - create_time = proto.Field( - proto.MESSAGE, - number=11, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=12, - message=timestamp_pb2.Timestamp, - ) - state = proto.Field( - proto.ENUM, - number=13, - enum=State, - ) - schema_title = proto.Field( - proto.STRING, - number=14, - ) - schema_version = proto.Field( - proto.STRING, - number=15, - ) - metadata = proto.Field( - proto.MESSAGE, - number=16, - message=struct_pb2.Struct, - ) - description = proto.Field( - proto.STRING, - number=17, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py deleted file mode 100644 index e0fd340b58..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py +++ /dev/null @@ -1,502 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import completion_stats as gca_completion_stats -from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec -from google.cloud.aiplatform_v1beta1.types import explanation -from google.cloud.aiplatform_v1beta1.types import io -from google.cloud.aiplatform_v1beta1.types import job_state -from google.cloud.aiplatform_v1beta1.types import machine_resources -from google.cloud.aiplatform_v1beta1.types import manual_batch_tuning_parameters as gca_manual_batch_tuning_parameters -from google.cloud.aiplatform_v1beta1.types import unmanaged_container_model as gca_unmanaged_container_model -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'BatchPredictionJob', - }, -) - - -class BatchPredictionJob(proto.Message): - r"""A job that uses a - [Model][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] to - produce predictions on multiple [input - instances][google.cloud.aiplatform.v1beta1.BatchPredictionJob.input_config]. - If predictions for significant portion of the instances fail, the - job may finish without attempting predictions for all remaining - instances. - - Attributes: - name (str): - Output only. Resource name of the - BatchPredictionJob. - display_name (str): - Required. The user-defined name of this - BatchPredictionJob. - model (str): - The name of the Model resoure that produces the predictions - via this job, must share the same ancestor Location. - Starting this job has no impact on any existing deployments - of the Model and their resources. Exactly one of model and - unmanaged_container_model must be set. - unmanaged_container_model (google.cloud.aiplatform_v1beta1.types.UnmanagedContainerModel): - Contains model information necessary to perform batch - prediction without requiring uploading to model registry. - Exactly one of model and unmanaged_container_model must be - set. - input_config (google.cloud.aiplatform_v1beta1.types.BatchPredictionJob.InputConfig): - Required. Input configuration of the instances on which - predictions are performed. The schema of any single instance - may be specified via the - [Model's][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] - [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]. - model_parameters (google.protobuf.struct_pb2.Value): - The parameters that govern the predictions. The schema of - the parameters may be specified via the - [Model's][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] - [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - [parameters_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri]. - output_config (google.cloud.aiplatform_v1beta1.types.BatchPredictionJob.OutputConfig): - Required. The Configuration specifying where output - predictions should be written. The schema of any single - prediction may be specified as a concatenation of - [Model's][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] - [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri] - and - [prediction_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.prediction_schema_uri]. - dedicated_resources (google.cloud.aiplatform_v1beta1.types.BatchDedicatedResources): - The config of resources used by the Model during the batch - prediction. If the Model - [supports][google.cloud.aiplatform.v1beta1.Model.supported_deployment_resources_types] - DEDICATED_RESOURCES this config may be provided (and the job - will use these resources), if the Model doesn't support - AUTOMATIC_RESOURCES, this config must be provided. - manual_batch_tuning_parameters (google.cloud.aiplatform_v1beta1.types.ManualBatchTuningParameters): - Immutable. Parameters configuring the batch behavior. - Currently only applicable when - [dedicated_resources][google.cloud.aiplatform.v1beta1.BatchPredictionJob.dedicated_resources] - are used (in other cases Vertex AI does the tuning itself). - generate_explanation (bool): - Generate explanation with the batch prediction results. - - When set to ``true``, the batch prediction output changes - based on the ``predictions_format`` field of the - [BatchPredictionJob.output_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.output_config] - object: - - - ``bigquery``: output includes a column named - ``explanation``. The value is a struct that conforms to - the - [Explanation][google.cloud.aiplatform.v1beta1.Explanation] - object. - - ``jsonl``: The JSON objects on each line include an - additional entry keyed ``explanation``. The value of the - entry is a JSON object that conforms to the - [Explanation][google.cloud.aiplatform.v1beta1.Explanation] - object. - - ``csv``: Generating explanations for CSV format is not - supported. - - If this field is set to true, either the - [Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec] - or - [explanation_spec][google.cloud.aiplatform.v1beta1.BatchPredictionJob.explanation_spec] - must be populated. - explanation_spec (google.cloud.aiplatform_v1beta1.types.ExplanationSpec): - Explanation configuration for this BatchPredictionJob. Can - be specified only if - [generate_explanation][google.cloud.aiplatform.v1beta1.BatchPredictionJob.generate_explanation] - is set to ``true``. - - This value overrides the value of - [Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec]. - All fields of - [explanation_spec][google.cloud.aiplatform.v1beta1.BatchPredictionJob.explanation_spec] - are optional in the request. If a field of the - [explanation_spec][google.cloud.aiplatform.v1beta1.BatchPredictionJob.explanation_spec] - object is not populated, the corresponding field of the - [Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec] - object is inherited. - output_info (google.cloud.aiplatform_v1beta1.types.BatchPredictionJob.OutputInfo): - Output only. Information further describing - the output of this job. - state (google.cloud.aiplatform_v1beta1.types.JobState): - Output only. The detailed state of the job. - error (google.rpc.status_pb2.Status): - Output only. Only populated when the job's state is - JOB_STATE_FAILED or JOB_STATE_CANCELLED. - partial_failures (Sequence[google.rpc.status_pb2.Status]): - Output only. Partial failures encountered. - For example, single files that can't be read. - This field never exceeds 20 entries. - Status details fields contain standard GCP error - details. - resources_consumed (google.cloud.aiplatform_v1beta1.types.ResourcesConsumed): - Output only. Information about resources that - had been consumed by this job. Provided in real - time at best effort basis, as well as a final - value once the job completes. - - Note: This field currently may be not populated - for batch predictions that use AutoML Models. - completion_stats (google.cloud.aiplatform_v1beta1.types.CompletionStats): - Output only. Statistics on completed and - failed prediction instances. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the BatchPredictionJob - was created. - start_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the BatchPredictionJob for the first - time entered the ``JOB_STATE_RUNNING`` state. - end_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the BatchPredictionJob entered any of - the following states: ``JOB_STATE_SUCCEEDED``, - ``JOB_STATE_FAILED``, ``JOB_STATE_CANCELLED``. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the BatchPredictionJob - was most recently updated. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.BatchPredictionJob.LabelsEntry]): - The labels with user-defined metadata to - organize BatchPredictionJobs. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. - See https://goo.gl/xmQnxf for more information - and examples of labels. - encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec): - Customer-managed encryption key options for a - BatchPredictionJob. If this is set, then all - resources created by the BatchPredictionJob will - be encrypted with the provided encryption key. - """ - - class InputConfig(proto.Message): - r"""Configures the input to - [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob]. - See - [Model.supported_input_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_input_storage_formats] - for Model's supported input formats, and how instances should be - expressed via any of them. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - gcs_source (google.cloud.aiplatform_v1beta1.types.GcsSource): - The Cloud Storage location for the input - instances. - - This field is a member of `oneof`_ ``source``. - bigquery_source (google.cloud.aiplatform_v1beta1.types.BigQuerySource): - The BigQuery location of the input table. - The schema of the table should be in the format - described by the given context OpenAPI Schema, - if one is provided. The table may contain - additional columns that are not described by the - schema, and they will be ignored. - - This field is a member of `oneof`_ ``source``. - instances_format (str): - Required. The format in which instances are given, must be - one of the - [Model's][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] - [supported_input_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_input_storage_formats]. - """ - - gcs_source = proto.Field( - proto.MESSAGE, - number=2, - oneof='source', - message=io.GcsSource, - ) - bigquery_source = proto.Field( - proto.MESSAGE, - number=3, - oneof='source', - message=io.BigQuerySource, - ) - instances_format = proto.Field( - proto.STRING, - number=1, - ) - - class OutputConfig(proto.Message): - r"""Configures the output of - [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob]. - See - [Model.supported_output_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_output_storage_formats] - for supported output formats, and how predictions are expressed via - any of them. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - gcs_destination (google.cloud.aiplatform_v1beta1.types.GcsDestination): - The Cloud Storage location of the directory where the output - is to be written to. In the given directory a new directory - is created. Its name is - ``prediction--``, where - timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. - Inside of it files ``predictions_0001.``, - ``predictions_0002.``, ..., - ``predictions_N.`` are created where - ```` depends on chosen - [predictions_format][google.cloud.aiplatform.v1beta1.BatchPredictionJob.OutputConfig.predictions_format], - and N may equal 0001 and depends on the total number of - successfully predicted instances. If the Model has both - [instance][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri] - and - [prediction][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri] - schemata defined then each such file contains predictions as - per the - [predictions_format][google.cloud.aiplatform.v1beta1.BatchPredictionJob.OutputConfig.predictions_format]. - If prediction for any instance failed (partially or - completely), then an additional ``errors_0001.``, - ``errors_0002.``,..., ``errors_N.`` - files are created (N depends on total number of failed - predictions). These files contain the failed instances, as - per their schema, followed by an additional ``error`` field - which as value has [google.rpc.Status][google.rpc.Status] - containing only ``code`` and ``message`` fields. - - This field is a member of `oneof`_ ``destination``. - bigquery_destination (google.cloud.aiplatform_v1beta1.types.BigQueryDestination): - The BigQuery project or dataset location where the output is - to be written to. If project is provided, a new dataset is - created with name - ``prediction__`` where - is made BigQuery-dataset-name compatible (for example, most - special characters become underscores), and timestamp is in - YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the - dataset two tables will be created, ``predictions``, and - ``errors``. If the Model has both - [instance][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri] - and - [prediction][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri] - schemata defined then the tables have columns as follows: - The ``predictions`` table contains instances for which the - prediction succeeded, it has columns as per a concatenation - of the Model's instance and prediction schemata. The - ``errors`` table contains rows for which the prediction has - failed, it has instance columns, as per the instance schema, - followed by a single "errors" column, which as values has - [google.rpc.Status][google.rpc.Status] represented as a - STRUCT, and containing only ``code`` and ``message``. - - This field is a member of `oneof`_ ``destination``. - predictions_format (str): - Required. The format in which Vertex AI gives the - predictions, must be one of the - [Model's][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] - [supported_output_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_output_storage_formats]. - """ - - gcs_destination = proto.Field( - proto.MESSAGE, - number=2, - oneof='destination', - message=io.GcsDestination, - ) - bigquery_destination = proto.Field( - proto.MESSAGE, - number=3, - oneof='destination', - message=io.BigQueryDestination, - ) - predictions_format = proto.Field( - proto.STRING, - number=1, - ) - - class OutputInfo(proto.Message): - r"""Further describes this job's output. Supplements - [output_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.output_config]. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - gcs_output_directory (str): - Output only. The full path of the Cloud - Storage directory created, into which the - prediction output is written. - - This field is a member of `oneof`_ ``output_location``. - bigquery_output_dataset (str): - Output only. The path of the BigQuery dataset created, in - ``bq://projectId.bqDatasetId`` format, into which the - prediction output is written. - - This field is a member of `oneof`_ ``output_location``. - bigquery_output_table (str): - Output only. The name of the BigQuery table created, in - ``predictions_`` format, into which the - prediction output is written. Can be used by UI to generate - the BigQuery output path, for example. - """ - - gcs_output_directory = proto.Field( - proto.STRING, - number=1, - oneof='output_location', - ) - bigquery_output_dataset = proto.Field( - proto.STRING, - number=2, - oneof='output_location', - ) - bigquery_output_table = proto.Field( - proto.STRING, - number=4, - ) - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - model = proto.Field( - proto.STRING, - number=3, - ) - unmanaged_container_model = proto.Field( - proto.MESSAGE, - number=28, - message=gca_unmanaged_container_model.UnmanagedContainerModel, - ) - input_config = proto.Field( - proto.MESSAGE, - number=4, - message=InputConfig, - ) - model_parameters = proto.Field( - proto.MESSAGE, - number=5, - message=struct_pb2.Value, - ) - output_config = proto.Field( - proto.MESSAGE, - number=6, - message=OutputConfig, - ) - dedicated_resources = proto.Field( - proto.MESSAGE, - number=7, - message=machine_resources.BatchDedicatedResources, - ) - manual_batch_tuning_parameters = proto.Field( - proto.MESSAGE, - number=8, - message=gca_manual_batch_tuning_parameters.ManualBatchTuningParameters, - ) - generate_explanation = proto.Field( - proto.BOOL, - number=23, - ) - explanation_spec = proto.Field( - proto.MESSAGE, - number=25, - message=explanation.ExplanationSpec, - ) - output_info = proto.Field( - proto.MESSAGE, - number=9, - message=OutputInfo, - ) - state = proto.Field( - proto.ENUM, - number=10, - enum=job_state.JobState, - ) - error = proto.Field( - proto.MESSAGE, - number=11, - message=status_pb2.Status, - ) - partial_failures = proto.RepeatedField( - proto.MESSAGE, - number=12, - message=status_pb2.Status, - ) - resources_consumed = proto.Field( - proto.MESSAGE, - number=13, - message=machine_resources.ResourcesConsumed, - ) - completion_stats = proto.Field( - proto.MESSAGE, - number=14, - message=gca_completion_stats.CompletionStats, - ) - create_time = proto.Field( - proto.MESSAGE, - number=15, - message=timestamp_pb2.Timestamp, - ) - start_time = proto.Field( - proto.MESSAGE, - number=16, - message=timestamp_pb2.Timestamp, - ) - end_time = proto.Field( - proto.MESSAGE, - number=17, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=18, - message=timestamp_pb2.Timestamp, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=19, - ) - encryption_spec = proto.Field( - proto.MESSAGE, - number=24, - message=gca_encryption_spec.EncryptionSpec, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/completion_stats.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/completion_stats.py deleted file mode 100644 index 3d8055f95a..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/completion_stats.py +++ /dev/null @@ -1,63 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'CompletionStats', - }, -) - - -class CompletionStats(proto.Message): - r"""Success and error statistics of processing multiple entities - (for example, DataItems or structured data rows) in batch. - - Attributes: - successful_count (int): - Output only. The number of entities that had - been processed successfully. - failed_count (int): - Output only. The number of entities for which - any error was encountered. - incomplete_count (int): - Output only. In cases when enough errors are - encountered a job, pipeline, or operation may be - failed as a whole. Below is the number of - entities for which the processing had not been - finished (either in successful or failed state). - Set to -1 if the number is unknown (for example, - the operation failed before the total entity - number could be collected). - """ - - successful_count = proto.Field( - proto.INT64, - number=1, - ) - failed_count = proto.Field( - proto.INT64, - number=2, - ) - incomplete_count = proto.Field( - proto.INT64, - number=3, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/context.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/context.py deleted file mode 100644 index 057098dfa0..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/context.py +++ /dev/null @@ -1,136 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'Context', - }, -) - - -class Context(proto.Message): - r"""Instance of a general context. - - Attributes: - name (str): - Output only. The resource name of the - Context. - display_name (str): - User provided display name of the Context. - May be up to 128 Unicode characters. - etag (str): - An eTag used to perform consistent read- - odify-write updates. If not set, a blind - "overwrite" update happens. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.Context.LabelsEntry]): - The labels with user-defined metadata to - organize your Contexts. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. No more than 64 user labels can be - associated with one Context (System labels are - excluded). - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Context was - created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Context was - last updated. - parent_contexts (Sequence[str]): - Output only. A list of resource names of Contexts that are - parents of this Context. A Context may have at most 10 - parent_contexts. - schema_title (str): - The title of the schema describing the - metadata. - Schema title and version is expected to be - registered in earlier Create Schema calls. And - both are used together as unique identifiers to - identify schemas within the local metadata - store. - schema_version (str): - The version of the schema in schema_name to use. - - Schema title and version is expected to be registered in - earlier Create Schema calls. And both are used together as - unique identifiers to identify schemas within the local - metadata store. - metadata (google.protobuf.struct_pb2.Struct): - Properties of the Context. - The size of this field should not exceed 200KB. - description (str): - Description of the Context - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - etag = proto.Field( - proto.STRING, - number=8, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=9, - ) - create_time = proto.Field( - proto.MESSAGE, - number=10, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=11, - message=timestamp_pb2.Timestamp, - ) - parent_contexts = proto.RepeatedField( - proto.STRING, - number=12, - ) - schema_title = proto.Field( - proto.STRING, - number=13, - ) - schema_version = proto.Field( - proto.STRING, - number=14, - ) - metadata = proto.Field( - proto.MESSAGE, - number=15, - message=struct_pb2.Struct, - ) - description = proto.Field( - proto.STRING, - number=16, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/custom_job.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/custom_job.py deleted file mode 100644 index 3e6c77881c..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/custom_job.py +++ /dev/null @@ -1,438 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec -from google.cloud.aiplatform_v1beta1.types import io -from google.cloud.aiplatform_v1beta1.types import job_state -from google.cloud.aiplatform_v1beta1.types import machine_resources -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'CustomJob', - 'CustomJobSpec', - 'WorkerPoolSpec', - 'ContainerSpec', - 'PythonPackageSpec', - 'Scheduling', - }, -) - - -class CustomJob(proto.Message): - r"""Represents a job that runs custom workloads such as a Docker - container or a Python package. A CustomJob can have multiple - worker pools and each worker pool can have its own machine and - input spec. A CustomJob will be cleaned up once the job enters - terminal state (failed or succeeded). - - Attributes: - name (str): - Output only. Resource name of a CustomJob. - display_name (str): - Required. The display name of the CustomJob. - The name can be up to 128 characters long and - can be consist of any UTF-8 characters. - job_spec (google.cloud.aiplatform_v1beta1.types.CustomJobSpec): - Required. Job spec. - state (google.cloud.aiplatform_v1beta1.types.JobState): - Output only. The detailed state of the job. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the CustomJob was - created. - start_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the CustomJob for the first time - entered the ``JOB_STATE_RUNNING`` state. - end_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the CustomJob entered any of the - following states: ``JOB_STATE_SUCCEEDED``, - ``JOB_STATE_FAILED``, ``JOB_STATE_CANCELLED``. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the CustomJob was most - recently updated. - error (google.rpc.status_pb2.Status): - Output only. Only populated when job's state is - ``JOB_STATE_FAILED`` or ``JOB_STATE_CANCELLED``. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.CustomJob.LabelsEntry]): - The labels with user-defined metadata to - organize CustomJobs. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. - See https://goo.gl/xmQnxf for more information - and examples of labels. - encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec): - Customer-managed encryption key options for a - CustomJob. If this is set, then all resources - created by the CustomJob will be encrypted with - the provided encryption key. - web_access_uris (Sequence[google.cloud.aiplatform_v1beta1.types.CustomJob.WebAccessUrisEntry]): - Output only. URIs for accessing `interactive - shells `__ - (one URI for each training node). Only available if - [job_spec.enable_web_access][google.cloud.aiplatform.v1beta1.CustomJobSpec.enable_web_access] - is ``true``. - - The keys are names of each node in the training job; for - example, ``workerpool0-0`` for the primary node, - ``workerpool1-0`` for the first node in the second worker - pool, and ``workerpool1-1`` for the second node in the - second worker pool. - - The values are the URIs for each node's interactive shell. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - job_spec = proto.Field( - proto.MESSAGE, - number=4, - message='CustomJobSpec', - ) - state = proto.Field( - proto.ENUM, - number=5, - enum=job_state.JobState, - ) - create_time = proto.Field( - proto.MESSAGE, - number=6, - message=timestamp_pb2.Timestamp, - ) - start_time = proto.Field( - proto.MESSAGE, - number=7, - message=timestamp_pb2.Timestamp, - ) - end_time = proto.Field( - proto.MESSAGE, - number=8, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=9, - message=timestamp_pb2.Timestamp, - ) - error = proto.Field( - proto.MESSAGE, - number=10, - message=status_pb2.Status, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=11, - ) - encryption_spec = proto.Field( - proto.MESSAGE, - number=12, - message=gca_encryption_spec.EncryptionSpec, - ) - web_access_uris = proto.MapField( - proto.STRING, - proto.STRING, - number=16, - ) - - -class CustomJobSpec(proto.Message): - r"""Represents the spec of a CustomJob. - - Attributes: - worker_pool_specs (Sequence[google.cloud.aiplatform_v1beta1.types.WorkerPoolSpec]): - Required. The spec of the worker pools - including machine type and Docker image. All - worker pools except the first one are optional - and can be skipped by providing an empty value. - scheduling (google.cloud.aiplatform_v1beta1.types.Scheduling): - Scheduling options for a CustomJob. - service_account (str): - Specifies the service account for workload run-as account. - Users submitting jobs must have act-as permission on this - run-as account. If unspecified, the `Vertex AI Custom Code - Service - Agent `__ - for the CustomJob's project is used. - network (str): - The full name of the Compute Engine - `network `__ - to which the Job should be peered. For example, - ``projects/12345/global/networks/myVPC``. - `Format `__ - is of the form - ``projects/{project}/global/networks/{network}``. Where - {project} is a project number, as in ``12345``, and - {network} is a network name. - - To specify this field, you must have already `configured VPC - Network Peering for Vertex - AI `__. - - If this field is left unspecified, the job is not peered - with any network. - base_output_directory (google.cloud.aiplatform_v1beta1.types.GcsDestination): - The Cloud Storage location to store the output of this - CustomJob or HyperparameterTuningJob. For - HyperparameterTuningJob, the baseOutputDirectory of each - child CustomJob backing a Trial is set to a subdirectory of - name [id][google.cloud.aiplatform.v1beta1.Trial.id] under - its parent HyperparameterTuningJob's baseOutputDirectory. - - The following Vertex AI environment variables will be passed - to containers or python modules when this field is set: - - For CustomJob: - - - AIP_MODEL_DIR = ``/model/`` - - AIP_CHECKPOINT_DIR = - ``/checkpoints/`` - - AIP_TENSORBOARD_LOG_DIR = - ``/logs/`` - - For CustomJob backing a Trial of HyperparameterTuningJob: - - - AIP_MODEL_DIR = - ``//model/`` - - AIP_CHECKPOINT_DIR = - ``//checkpoints/`` - - AIP_TENSORBOARD_LOG_DIR = - ``//logs/`` - tensorboard (str): - Optional. The name of a Vertex AI - [Tensorboard][google.cloud.aiplatform.v1beta1.Tensorboard] - resource to which this CustomJob will upload Tensorboard - logs. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` - enable_web_access (bool): - Optional. Whether you want Vertex AI to enable `interactive - shell - access `__ - to training containers. - - If set to ``true``, you can access interactive shells at the - URIs given by - [CustomJob.web_access_uris][google.cloud.aiplatform.v1beta1.CustomJob.web_access_uris] - or - [Trial.web_access_uris][google.cloud.aiplatform.v1beta1.Trial.web_access_uris] - (within - [HyperparameterTuningJob.trials][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.trials]). - """ - - worker_pool_specs = proto.RepeatedField( - proto.MESSAGE, - number=1, - message='WorkerPoolSpec', - ) - scheduling = proto.Field( - proto.MESSAGE, - number=3, - message='Scheduling', - ) - service_account = proto.Field( - proto.STRING, - number=4, - ) - network = proto.Field( - proto.STRING, - number=5, - ) - base_output_directory = proto.Field( - proto.MESSAGE, - number=6, - message=io.GcsDestination, - ) - tensorboard = proto.Field( - proto.STRING, - number=7, - ) - enable_web_access = proto.Field( - proto.BOOL, - number=10, - ) - - -class WorkerPoolSpec(proto.Message): - r"""Represents the spec of a worker pool in a job. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - container_spec (google.cloud.aiplatform_v1beta1.types.ContainerSpec): - The custom container task. - - This field is a member of `oneof`_ ``task``. - python_package_spec (google.cloud.aiplatform_v1beta1.types.PythonPackageSpec): - The Python packaged task. - - This field is a member of `oneof`_ ``task``. - machine_spec (google.cloud.aiplatform_v1beta1.types.MachineSpec): - Optional. Immutable. The specification of a - single machine. - replica_count (int): - Optional. The number of worker replicas to - use for this worker pool. - disk_spec (google.cloud.aiplatform_v1beta1.types.DiskSpec): - Disk spec. - """ - - container_spec = proto.Field( - proto.MESSAGE, - number=6, - oneof='task', - message='ContainerSpec', - ) - python_package_spec = proto.Field( - proto.MESSAGE, - number=7, - oneof='task', - message='PythonPackageSpec', - ) - machine_spec = proto.Field( - proto.MESSAGE, - number=1, - message=machine_resources.MachineSpec, - ) - replica_count = proto.Field( - proto.INT64, - number=2, - ) - disk_spec = proto.Field( - proto.MESSAGE, - number=5, - message=machine_resources.DiskSpec, - ) - - -class ContainerSpec(proto.Message): - r"""The spec of a Container. - - Attributes: - image_uri (str): - Required. The URI of a container image in the - Container Registry that is to be run on each - worker replica. - command (Sequence[str]): - The command to be invoked when the container - is started. It overrides the entrypoint - instruction in Dockerfile when provided. - args (Sequence[str]): - The arguments to be passed when starting the - container. - """ - - image_uri = proto.Field( - proto.STRING, - number=1, - ) - command = proto.RepeatedField( - proto.STRING, - number=2, - ) - args = proto.RepeatedField( - proto.STRING, - number=3, - ) - - -class PythonPackageSpec(proto.Message): - r"""The spec of a Python packaged code. - - Attributes: - executor_image_uri (str): - Required. The URI of a container image in Artifact Registry - that will run the provided Python package. Vertex AI - provides a wide range of executor images with pre-installed - packages to meet users' various use cases. See the list of - `pre-built containers for - training `__. - You must use an image from this list. - package_uris (Sequence[str]): - Required. The Google Cloud Storage location - of the Python package files which are the - training program and its dependent packages. The - maximum number of package URIs is 100. - python_module (str): - Required. The Python module name to run after - installing the packages. - args (Sequence[str]): - Command line arguments to be passed to the - Python task. - """ - - executor_image_uri = proto.Field( - proto.STRING, - number=1, - ) - package_uris = proto.RepeatedField( - proto.STRING, - number=2, - ) - python_module = proto.Field( - proto.STRING, - number=3, - ) - args = proto.RepeatedField( - proto.STRING, - number=4, - ) - - -class Scheduling(proto.Message): - r"""All parameters related to queuing and scheduling of custom - jobs. - - Attributes: - timeout (google.protobuf.duration_pb2.Duration): - The maximum job running time. The default is - 7 days. - restart_job_on_worker_restart (bool): - Restarts the entire CustomJob if a worker - gets restarted. This feature can be used by - distributed training jobs that are not resilient - to workers leaving and joining a job. - """ - - timeout = proto.Field( - proto.MESSAGE, - number=1, - message=duration_pb2.Duration, - ) - restart_job_on_worker_restart = proto.Field( - proto.BOOL, - number=3, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/data_item.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/data_item.py deleted file mode 100644 index c638c0e00d..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/data_item.py +++ /dev/null @@ -1,101 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'DataItem', - }, -) - - -class DataItem(proto.Message): - r"""A piece of data in a Dataset. Could be an image, a video, a - document or plain text. - - Attributes: - name (str): - Output only. The resource name of the - DataItem. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this DataItem was - created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this DataItem was - last updated. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.DataItem.LabelsEntry]): - Optional. The labels with user-defined - metadata to organize your DataItems. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. No more than 64 user labels can be - associated with one DataItem(System labels are - excluded). - - See https://goo.gl/xmQnxf for more information - and examples of labels. System reserved label - keys are prefixed with - "aiplatform.googleapis.com/" and are immutable. - payload (google.protobuf.struct_pb2.Value): - Required. The data that the DataItem represents (for - example, an image or a text snippet). The schema of the - payload is stored in the parent Dataset's [metadata - schema's][google.cloud.aiplatform.v1beta1.Dataset.metadata_schema_uri] - dataItemSchemaUri field. - etag (str): - Optional. Used to perform consistent read- - odify-write updates. If not set, a blind - "overwrite" update happens. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - create_time = proto.Field( - proto.MESSAGE, - number=2, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=6, - message=timestamp_pb2.Timestamp, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=3, - ) - payload = proto.Field( - proto.MESSAGE, - number=4, - message=struct_pb2.Value, - ) - etag = proto.Field( - proto.STRING, - number=7, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/data_labeling_job.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/data_labeling_job.py deleted file mode 100644 index c80925db0c..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/data_labeling_job.py +++ /dev/null @@ -1,350 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec -from google.cloud.aiplatform_v1beta1.types import job_state -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore -from google.type import money_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'DataLabelingJob', - 'ActiveLearningConfig', - 'SampleConfig', - 'TrainingConfig', - }, -) - - -class DataLabelingJob(proto.Message): - r"""DataLabelingJob is used to trigger a human labeling job on - unlabeled data from the following Dataset: - - Attributes: - name (str): - Output only. Resource name of the - DataLabelingJob. - display_name (str): - Required. The user-defined name of the - DataLabelingJob. The name can be up to 128 - characters long and can be consist of any UTF-8 - characters. - Display name of a DataLabelingJob. - datasets (Sequence[str]): - Required. Dataset resource names. Right now we only support - labeling from a single Dataset. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}`` - annotation_labels (Sequence[google.cloud.aiplatform_v1beta1.types.DataLabelingJob.AnnotationLabelsEntry]): - Labels to assign to annotations generated by - this DataLabelingJob. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. See https://goo.gl/xmQnxf for more - information and examples of labels. System - reserved label keys are prefixed with - "aiplatform.googleapis.com/" and are immutable. - labeler_count (int): - Required. Number of labelers to work on each - DataItem. - instruction_uri (str): - Required. The Google Cloud Storage location - of the instruction pdf. This pdf is shared with - labelers, and provides detailed description on - how to label DataItems in Datasets. - inputs_schema_uri (str): - Required. Points to a YAML file stored on - Google Cloud Storage describing the config for a - specific type of DataLabelingJob. The schema - files that can be used here are found in the - https://storage.googleapis.com/google-cloud- - aiplatform bucket in the - /schema/datalabelingjob/inputs/ folder. - inputs (google.protobuf.struct_pb2.Value): - Required. Input config parameters for the - DataLabelingJob. - state (google.cloud.aiplatform_v1beta1.types.JobState): - Output only. The detailed state of the job. - labeling_progress (int): - Output only. Current labeling job progress percentage scaled - in interval [0, 100], indicating the percentage of DataItems - that has been finished. - current_spend (google.type.money_pb2.Money): - Output only. Estimated cost(in US dollars) - that the DataLabelingJob has incurred to date. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - DataLabelingJob was created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - DataLabelingJob was updated most recently. - error (google.rpc.status_pb2.Status): - Output only. DataLabelingJob errors. It is only populated - when job's state is ``JOB_STATE_FAILED`` or - ``JOB_STATE_CANCELLED``. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.DataLabelingJob.LabelsEntry]): - The labels with user-defined metadata to organize your - DataLabelingJobs. - - Label keys and values can be no longer than 64 characters - (Unicode codepoints), can only contain lowercase letters, - numeric characters, underscores and dashes. International - characters are allowed. - - See https://goo.gl/xmQnxf for more information and examples - of labels. System reserved label keys are prefixed with - "aiplatform.googleapis.com/" and are immutable. Following - system labels exist for each DataLabelingJob: - - - "aiplatform.googleapis.com/schema": output only, its - value is the - [inputs_schema][google.cloud.aiplatform.v1beta1.DataLabelingJob.inputs_schema_uri]'s - title. - specialist_pools (Sequence[str]): - The SpecialistPools' resource names - associated with this job. - encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec): - Customer-managed encryption key spec for a - DataLabelingJob. If set, this DataLabelingJob - will be secured by this key. - Note: Annotations created in the DataLabelingJob - are associated with the EncryptionSpec of the - Dataset they are exported to. - active_learning_config (google.cloud.aiplatform_v1beta1.types.ActiveLearningConfig): - Parameters that configure the active learning - pipeline. Active learning will label the data - incrementally via several iterations. For every - iteration, it will select a batch of data based - on the sampling strategy. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - datasets = proto.RepeatedField( - proto.STRING, - number=3, - ) - annotation_labels = proto.MapField( - proto.STRING, - proto.STRING, - number=12, - ) - labeler_count = proto.Field( - proto.INT32, - number=4, - ) - instruction_uri = proto.Field( - proto.STRING, - number=5, - ) - inputs_schema_uri = proto.Field( - proto.STRING, - number=6, - ) - inputs = proto.Field( - proto.MESSAGE, - number=7, - message=struct_pb2.Value, - ) - state = proto.Field( - proto.ENUM, - number=8, - enum=job_state.JobState, - ) - labeling_progress = proto.Field( - proto.INT32, - number=13, - ) - current_spend = proto.Field( - proto.MESSAGE, - number=14, - message=money_pb2.Money, - ) - create_time = proto.Field( - proto.MESSAGE, - number=9, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=10, - message=timestamp_pb2.Timestamp, - ) - error = proto.Field( - proto.MESSAGE, - number=22, - message=status_pb2.Status, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=11, - ) - specialist_pools = proto.RepeatedField( - proto.STRING, - number=16, - ) - encryption_spec = proto.Field( - proto.MESSAGE, - number=20, - message=gca_encryption_spec.EncryptionSpec, - ) - active_learning_config = proto.Field( - proto.MESSAGE, - number=21, - message='ActiveLearningConfig', - ) - - -class ActiveLearningConfig(proto.Message): - r"""Parameters that configure the active learning pipeline. - Active learning will label the data incrementally by several - iterations. For every iteration, it will select a batch of data - based on the sampling strategy. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - max_data_item_count (int): - Max number of human labeled DataItems. - - This field is a member of `oneof`_ ``human_labeling_budget``. - max_data_item_percentage (int): - Max percent of total DataItems for human - labeling. - - This field is a member of `oneof`_ ``human_labeling_budget``. - sample_config (google.cloud.aiplatform_v1beta1.types.SampleConfig): - Active learning data sampling config. For - every active learning labeling iteration, it - will select a batch of data based on the - sampling strategy. - training_config (google.cloud.aiplatform_v1beta1.types.TrainingConfig): - CMLE training config. For every active - learning labeling iteration, system will train a - machine learning model on CMLE. The trained - model will be used by data sampling algorithm to - select DataItems. - """ - - max_data_item_count = proto.Field( - proto.INT64, - number=1, - oneof='human_labeling_budget', - ) - max_data_item_percentage = proto.Field( - proto.INT32, - number=2, - oneof='human_labeling_budget', - ) - sample_config = proto.Field( - proto.MESSAGE, - number=3, - message='SampleConfig', - ) - training_config = proto.Field( - proto.MESSAGE, - number=4, - message='TrainingConfig', - ) - - -class SampleConfig(proto.Message): - r"""Active learning data sampling config. For every active - learning labeling iteration, it will select a batch of data - based on the sampling strategy. - - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - initial_batch_sample_percentage (int): - The percentage of data needed to be labeled - in the first batch. - - This field is a member of `oneof`_ ``initial_batch_sample_size``. - following_batch_sample_percentage (int): - The percentage of data needed to be labeled - in each following batch (except the first - batch). - - This field is a member of `oneof`_ ``following_batch_sample_size``. - sample_strategy (google.cloud.aiplatform_v1beta1.types.SampleConfig.SampleStrategy): - Field to choose sampling strategy. Sampling - strategy will decide which data should be - selected for human labeling in every batch. - """ - class SampleStrategy(proto.Enum): - r"""Sample strategy decides which subset of DataItems should be - selected for human labeling in every batch. - """ - SAMPLE_STRATEGY_UNSPECIFIED = 0 - UNCERTAINTY = 1 - - initial_batch_sample_percentage = proto.Field( - proto.INT32, - number=1, - oneof='initial_batch_sample_size', - ) - following_batch_sample_percentage = proto.Field( - proto.INT32, - number=3, - oneof='following_batch_sample_size', - ) - sample_strategy = proto.Field( - proto.ENUM, - number=5, - enum=SampleStrategy, - ) - - -class TrainingConfig(proto.Message): - r"""CMLE training config. For every active learning labeling - iteration, system will train a machine learning model on CMLE. - The trained model will be used by data sampling algorithm to - select DataItems. - - Attributes: - timeout_training_milli_hours (int): - The timeout hours for the CMLE training job, - expressed in milli hours i.e. 1,000 value in - this field means 1 hour. - """ - - timeout_training_milli_hours = proto.Field( - proto.INT64, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/dataset.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/dataset.py deleted file mode 100644 index d261c27a3d..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/dataset.py +++ /dev/null @@ -1,237 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec -from google.cloud.aiplatform_v1beta1.types import io -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'Dataset', - 'ImportDataConfig', - 'ExportDataConfig', - }, -) - - -class Dataset(proto.Message): - r"""A collection of DataItems and Annotations on them. - - Attributes: - name (str): - Output only. The resource name of the - Dataset. - display_name (str): - Required. The user-defined name of the - Dataset. The name can be up to 128 characters - long and can be consist of any UTF-8 characters. - description (str): - Optional. The description of the Dataset. - metadata_schema_uri (str): - Required. Points to a YAML file stored on - Google Cloud Storage describing additional - information about the Dataset. The schema is - defined as an OpenAPI 3.0.2 Schema Object. The - schema files that can be used here are found in - gs://google-cloud- - aiplatform/schema/dataset/metadata/. - metadata (google.protobuf.struct_pb2.Value): - Required. Additional information about the - Dataset. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Dataset was - created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Dataset was - last updated. - etag (str): - Used to perform consistent read-modify-write - updates. If not set, a blind "overwrite" update - happens. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.Dataset.LabelsEntry]): - The labels with user-defined metadata to organize your - Datasets. - - Label keys and values can be no longer than 64 characters - (Unicode codepoints), can only contain lowercase letters, - numeric characters, underscores and dashes. International - characters are allowed. No more than 64 user labels can be - associated with one Dataset (System labels are excluded). - - See https://goo.gl/xmQnxf for more information and examples - of labels. System reserved label keys are prefixed with - "aiplatform.googleapis.com/" and are immutable. Following - system labels exist for each Dataset: - - - "aiplatform.googleapis.com/dataset_metadata_schema": - output only, its value is the - [metadata_schema's][google.cloud.aiplatform.v1beta1.Dataset.metadata_schema_uri] - title. - encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec): - Customer-managed encryption key spec for a - Dataset. If set, this Dataset and all sub- - resources of this Dataset will be secured by - this key. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - description = proto.Field( - proto.STRING, - number=16, - ) - metadata_schema_uri = proto.Field( - proto.STRING, - number=3, - ) - metadata = proto.Field( - proto.MESSAGE, - number=8, - message=struct_pb2.Value, - ) - create_time = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=5, - message=timestamp_pb2.Timestamp, - ) - etag = proto.Field( - proto.STRING, - number=6, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=7, - ) - encryption_spec = proto.Field( - proto.MESSAGE, - number=11, - message=gca_encryption_spec.EncryptionSpec, - ) - - -class ImportDataConfig(proto.Message): - r"""Describes the location from where we import data into a - Dataset, together with the labels that will be applied to the - DataItems and the Annotations. - - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - gcs_source (google.cloud.aiplatform_v1beta1.types.GcsSource): - The Google Cloud Storage location for the - input content. - - This field is a member of `oneof`_ ``source``. - data_item_labels (Sequence[google.cloud.aiplatform_v1beta1.types.ImportDataConfig.DataItemLabelsEntry]): - Labels that will be applied to newly imported DataItems. If - an identical DataItem as one being imported already exists - in the Dataset, then these labels will be appended to these - of the already existing one, and if labels with identical - key is imported before, the old label value will be - overwritten. If two DataItems are identical in the same - import data operation, the labels will be combined and if - key collision happens in this case, one of the values will - be picked randomly. Two DataItems are considered identical - if their content bytes are identical (e.g. image bytes or - pdf bytes). These labels will be overridden by Annotation - labels specified inside index file referenced by - [import_schema_uri][google.cloud.aiplatform.v1beta1.ImportDataConfig.import_schema_uri], - e.g. jsonl file. - import_schema_uri (str): - Required. Points to a YAML file stored on Google Cloud - Storage describing the import format. Validation will be - done against the schema. The schema is defined as an - `OpenAPI 3.0.2 Schema - Object `__. - """ - - gcs_source = proto.Field( - proto.MESSAGE, - number=1, - oneof='source', - message=io.GcsSource, - ) - data_item_labels = proto.MapField( - proto.STRING, - proto.STRING, - number=2, - ) - import_schema_uri = proto.Field( - proto.STRING, - number=4, - ) - - -class ExportDataConfig(proto.Message): - r"""Describes what part of the Dataset is to be exported, the - destination of the export and how to export. - - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - gcs_destination (google.cloud.aiplatform_v1beta1.types.GcsDestination): - The Google Cloud Storage location where the output is to be - written to. In the given directory a new directory will be - created with name: - ``export-data--`` - where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 - format. All export output will be written into that - directory. Inside that directory, annotations with the same - schema will be grouped into sub directories which are named - with the corresponding annotations' schema title. Inside - these sub directories, a schema.yaml will be created to - describe the output format. - - This field is a member of `oneof`_ ``destination``. - annotations_filter (str): - A filter on Annotations of the Dataset. Only Annotations on - to-be-exported DataItems(specified by [data_items_filter][]) - that match this filter will be exported. The filter syntax - is the same as in - [ListAnnotations][google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations]. - """ - - gcs_destination = proto.Field( - proto.MESSAGE, - number=1, - oneof='destination', - message=io.GcsDestination, - ) - annotations_filter = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/dataset_service.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/dataset_service.py deleted file mode 100644 index 4d68fd5dcf..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/dataset_service.py +++ /dev/null @@ -1,543 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import annotation -from google.cloud.aiplatform_v1beta1.types import data_item -from google.cloud.aiplatform_v1beta1.types import dataset as gca_dataset -from google.cloud.aiplatform_v1beta1.types import operation -from google.protobuf import field_mask_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'CreateDatasetRequest', - 'CreateDatasetOperationMetadata', - 'GetDatasetRequest', - 'UpdateDatasetRequest', - 'ListDatasetsRequest', - 'ListDatasetsResponse', - 'DeleteDatasetRequest', - 'ImportDataRequest', - 'ImportDataResponse', - 'ImportDataOperationMetadata', - 'ExportDataRequest', - 'ExportDataResponse', - 'ExportDataOperationMetadata', - 'ListDataItemsRequest', - 'ListDataItemsResponse', - 'GetAnnotationSpecRequest', - 'ListAnnotationsRequest', - 'ListAnnotationsResponse', - }, -) - - -class CreateDatasetRequest(proto.Message): - r"""Request message for - [DatasetService.CreateDataset][google.cloud.aiplatform.v1beta1.DatasetService.CreateDataset]. - - Attributes: - parent (str): - Required. The resource name of the Location to create the - Dataset in. Format: - ``projects/{project}/locations/{location}`` - dataset (google.cloud.aiplatform_v1beta1.types.Dataset): - Required. The Dataset to create. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - dataset = proto.Field( - proto.MESSAGE, - number=2, - message=gca_dataset.Dataset, - ) - - -class CreateDatasetOperationMetadata(proto.Message): - r"""Runtime operation information for - [DatasetService.CreateDataset][google.cloud.aiplatform.v1beta1.DatasetService.CreateDataset]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - The operation generic information. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class GetDatasetRequest(proto.Message): - r"""Request message for - [DatasetService.GetDataset][google.cloud.aiplatform.v1beta1.DatasetService.GetDataset]. - - Attributes: - name (str): - Required. The name of the Dataset resource. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - - -class UpdateDatasetRequest(proto.Message): - r"""Request message for - [DatasetService.UpdateDataset][google.cloud.aiplatform.v1beta1.DatasetService.UpdateDataset]. - - Attributes: - dataset (google.cloud.aiplatform_v1beta1.types.Dataset): - Required. The Dataset which replaces the - resource on the server. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The update mask applies to the resource. For the - ``FieldMask`` definition, see - [google.protobuf.FieldMask][google.protobuf.FieldMask]. - Updatable fields: - - - ``display_name`` - - ``description`` - - ``labels`` - """ - - dataset = proto.Field( - proto.MESSAGE, - number=1, - message=gca_dataset.Dataset, - ) - update_mask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - - -class ListDatasetsRequest(proto.Message): - r"""Request message for - [DatasetService.ListDatasets][google.cloud.aiplatform.v1beta1.DatasetService.ListDatasets]. - - Attributes: - parent (str): - Required. The name of the Dataset's parent resource. Format: - ``projects/{project}/locations/{location}`` - filter (str): - An expression for filtering the results of the request. For - field names both snake_case and camelCase are supported. - - - ``display_name``: supports = and != - - ``metadata_schema_uri``: supports = and != - - ``labels`` supports general map functions that is: - - - ``labels.key=value`` - key:value equality - - \`labels.key:\* or labels:key - key existence - - A key including a space must be quoted. - ``labels."a key"``. - - Some examples: - - - ``displayName="myDisplayName"`` - - ``labels.myKey="myValue"`` - page_size (int): - The standard list page size. - page_token (str): - The standard list page token. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - order_by (str): - A comma-separated list of fields to order by, sorted in - ascending order. Use "desc" after a field name for - descending. Supported fields: - - - ``display_name`` - - ``create_time`` - - ``update_time`` - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=5, - message=field_mask_pb2.FieldMask, - ) - order_by = proto.Field( - proto.STRING, - number=6, - ) - - -class ListDatasetsResponse(proto.Message): - r"""Response message for - [DatasetService.ListDatasets][google.cloud.aiplatform.v1beta1.DatasetService.ListDatasets]. - - Attributes: - datasets (Sequence[google.cloud.aiplatform_v1beta1.types.Dataset]): - A list of Datasets that matches the specified - filter in the request. - next_page_token (str): - The standard List next-page token. - """ - - @property - def raw_page(self): - return self - - datasets = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_dataset.Dataset, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class DeleteDatasetRequest(proto.Message): - r"""Request message for - [DatasetService.DeleteDataset][google.cloud.aiplatform.v1beta1.DatasetService.DeleteDataset]. - - Attributes: - name (str): - Required. The resource name of the Dataset to delete. - Format: - ``projects/{project}/locations/{location}/datasets/{dataset}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ImportDataRequest(proto.Message): - r"""Request message for - [DatasetService.ImportData][google.cloud.aiplatform.v1beta1.DatasetService.ImportData]. - - Attributes: - name (str): - Required. The name of the Dataset resource. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}`` - import_configs (Sequence[google.cloud.aiplatform_v1beta1.types.ImportDataConfig]): - Required. The desired input locations. The - contents of all input locations will be imported - in one batch. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - import_configs = proto.RepeatedField( - proto.MESSAGE, - number=2, - message=gca_dataset.ImportDataConfig, - ) - - -class ImportDataResponse(proto.Message): - r"""Response message for - [DatasetService.ImportData][google.cloud.aiplatform.v1beta1.DatasetService.ImportData]. - - """ - - -class ImportDataOperationMetadata(proto.Message): - r"""Runtime operation information for - [DatasetService.ImportData][google.cloud.aiplatform.v1beta1.DatasetService.ImportData]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - The common part of the operation metadata. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class ExportDataRequest(proto.Message): - r"""Request message for - [DatasetService.ExportData][google.cloud.aiplatform.v1beta1.DatasetService.ExportData]. - - Attributes: - name (str): - Required. The name of the Dataset resource. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}`` - export_config (google.cloud.aiplatform_v1beta1.types.ExportDataConfig): - Required. The desired output location. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - export_config = proto.Field( - proto.MESSAGE, - number=2, - message=gca_dataset.ExportDataConfig, - ) - - -class ExportDataResponse(proto.Message): - r"""Response message for - [DatasetService.ExportData][google.cloud.aiplatform.v1beta1.DatasetService.ExportData]. - - Attributes: - exported_files (Sequence[str]): - All of the files that are exported in this - export operation. - """ - - exported_files = proto.RepeatedField( - proto.STRING, - number=1, - ) - - -class ExportDataOperationMetadata(proto.Message): - r"""Runtime operation information for - [DatasetService.ExportData][google.cloud.aiplatform.v1beta1.DatasetService.ExportData]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - The common part of the operation metadata. - gcs_output_directory (str): - A Google Cloud Storage directory which path - ends with '/'. The exported data is stored in - the directory. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - gcs_output_directory = proto.Field( - proto.STRING, - number=2, - ) - - -class ListDataItemsRequest(proto.Message): - r"""Request message for - [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems]. - - Attributes: - parent (str): - Required. The resource name of the Dataset to list DataItems - from. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}`` - filter (str): - The standard list filter. - page_size (int): - The standard list page size. - page_token (str): - The standard list page token. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - order_by (str): - A comma-separated list of fields to order by, - sorted in ascending order. Use "desc" after a - field name for descending. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=5, - message=field_mask_pb2.FieldMask, - ) - order_by = proto.Field( - proto.STRING, - number=6, - ) - - -class ListDataItemsResponse(proto.Message): - r"""Response message for - [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems]. - - Attributes: - data_items (Sequence[google.cloud.aiplatform_v1beta1.types.DataItem]): - A list of DataItems that matches the - specified filter in the request. - next_page_token (str): - The standard List next-page token. - """ - - @property - def raw_page(self): - return self - - data_items = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=data_item.DataItem, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class GetAnnotationSpecRequest(proto.Message): - r"""Request message for - [DatasetService.GetAnnotationSpec][google.cloud.aiplatform.v1beta1.DatasetService.GetAnnotationSpec]. - - Attributes: - name (str): - Required. The name of the AnnotationSpec resource. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}`` - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - - -class ListAnnotationsRequest(proto.Message): - r"""Request message for - [DatasetService.ListAnnotations][google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations]. - - Attributes: - parent (str): - Required. The resource name of the DataItem to list - Annotations from. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}`` - filter (str): - The standard list filter. - page_size (int): - The standard list page size. - page_token (str): - The standard list page token. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - order_by (str): - A comma-separated list of fields to order by, - sorted in ascending order. Use "desc" after a - field name for descending. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=5, - message=field_mask_pb2.FieldMask, - ) - order_by = proto.Field( - proto.STRING, - number=6, - ) - - -class ListAnnotationsResponse(proto.Message): - r"""Response message for - [DatasetService.ListAnnotations][google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations]. - - Attributes: - annotations (Sequence[google.cloud.aiplatform_v1beta1.types.Annotation]): - A list of Annotations that matches the - specified filter in the request. - next_page_token (str): - The standard List next-page token. - """ - - @property - def raw_page(self): - return self - - annotations = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=annotation.Annotation, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/deployed_index_ref.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/deployed_index_ref.py deleted file mode 100644 index 89661922d3..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/deployed_index_ref.py +++ /dev/null @@ -1,49 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'DeployedIndexRef', - }, -) - - -class DeployedIndexRef(proto.Message): - r"""Points to a DeployedIndex. - - Attributes: - index_endpoint (str): - Immutable. A resource name of the - IndexEndpoint. - deployed_index_id (str): - Immutable. The ID of the DeployedIndex in the - above IndexEndpoint. - """ - - index_endpoint = proto.Field( - proto.STRING, - number=1, - ) - deployed_index_id = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/deployed_model_ref.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/deployed_model_ref.py deleted file mode 100644 index 992500fcac..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/deployed_model_ref.py +++ /dev/null @@ -1,48 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'DeployedModelRef', - }, -) - - -class DeployedModelRef(proto.Message): - r"""Points to a DeployedModel. - - Attributes: - endpoint (str): - Immutable. A resource name of an Endpoint. - deployed_model_id (str): - Immutable. An ID of a DeployedModel in the - above Endpoint. - """ - - endpoint = proto.Field( - proto.STRING, - number=1, - ) - deployed_model_id = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/encryption_spec.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/encryption_spec.py deleted file mode 100644 index ad7e6df830..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/encryption_spec.py +++ /dev/null @@ -1,47 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'EncryptionSpec', - }, -) - - -class EncryptionSpec(proto.Message): - r"""Represents a customer-managed encryption key spec that can be - applied to a top-level resource. - - Attributes: - kms_key_name (str): - Required. The Cloud KMS resource identifier of the customer - managed encryption key used to protect a resource. Has the - form: - ``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``. - The key needs to be in the same region as where the compute - resource is created. - """ - - kms_key_name = proto.Field( - proto.STRING, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/endpoint.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/endpoint.py deleted file mode 100644 index 9dbc3785d6..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/endpoint.py +++ /dev/null @@ -1,371 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec -from google.cloud.aiplatform_v1beta1.types import explanation -from google.cloud.aiplatform_v1beta1.types import machine_resources -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'Endpoint', - 'DeployedModel', - 'PrivateEndpoints', - }, -) - - -class Endpoint(proto.Message): - r"""Models are deployed into it, and afterwards Endpoint is - called to obtain predictions and explanations. - - Attributes: - name (str): - Output only. The resource name of the - Endpoint. - display_name (str): - Required. The display name of the Endpoint. - The name can be up to 128 characters long and - can be consist of any UTF-8 characters. - description (str): - The description of the Endpoint. - deployed_models (Sequence[google.cloud.aiplatform_v1beta1.types.DeployedModel]): - Output only. The models deployed in this Endpoint. To add or - remove DeployedModels use - [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel] - and - [EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel] - respectively. - traffic_split (Sequence[google.cloud.aiplatform_v1beta1.types.Endpoint.TrafficSplitEntry]): - A map from a DeployedModel's ID to the - percentage of this Endpoint's traffic that - should be forwarded to that DeployedModel. - If a DeployedModel's ID is not listed in this - map, then it receives no traffic. - - The traffic percentage values must add up to - 100, or map must be empty if the Endpoint is to - not accept any traffic at a moment. - etag (str): - Used to perform consistent read-modify-write - updates. If not set, a blind "overwrite" update - happens. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.Endpoint.LabelsEntry]): - The labels with user-defined metadata to - organize your Endpoints. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. - See https://goo.gl/xmQnxf for more information - and examples of labels. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Endpoint was - created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Endpoint was - last updated. - encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec): - Customer-managed encryption key spec for an - Endpoint. If set, this Endpoint and all sub- - resources of this Endpoint will be secured by - this key. - network (str): - The full name of the Google Compute Engine - `network `__ - to which the Endpoint should be peered. - - Private services access must already be configured for the - network. If left unspecified, the Endpoint is not peered - with any network. - - Only one of the fields, - [network][google.cloud.aiplatform.v1beta1.Endpoint.network] - or - [enable_private_service_connect][google.cloud.aiplatform.v1beta1.Endpoint.enable_private_service_connect], - can be set. - - `Format `__: - ``projects/{project}/global/networks/{network}``. Where - ``{project}`` is a project number, as in ``12345``, and - ``{network}`` is network name. - enable_private_service_connect (bool): - If true, expose the Endpoint via private service connect. - - Only one of the fields, - [network][google.cloud.aiplatform.v1beta1.Endpoint.network] - or - [enable_private_service_connect][google.cloud.aiplatform.v1beta1.Endpoint.enable_private_service_connect], - can be set. - model_deployment_monitoring_job (str): - Output only. Resource name of the Model Monitoring job - associated with this Endpoint if monitoring is enabled by - [CreateModelDeploymentMonitoringJob][]. Format: - ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - description = proto.Field( - proto.STRING, - number=3, - ) - deployed_models = proto.RepeatedField( - proto.MESSAGE, - number=4, - message='DeployedModel', - ) - traffic_split = proto.MapField( - proto.STRING, - proto.INT32, - number=5, - ) - etag = proto.Field( - proto.STRING, - number=6, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=7, - ) - create_time = proto.Field( - proto.MESSAGE, - number=8, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=9, - message=timestamp_pb2.Timestamp, - ) - encryption_spec = proto.Field( - proto.MESSAGE, - number=10, - message=gca_encryption_spec.EncryptionSpec, - ) - network = proto.Field( - proto.STRING, - number=13, - ) - enable_private_service_connect = proto.Field( - proto.BOOL, - number=17, - ) - model_deployment_monitoring_job = proto.Field( - proto.STRING, - number=14, - ) - - -class DeployedModel(proto.Message): - r"""A deployment of a Model. Endpoints contain one or more - DeployedModels. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - dedicated_resources (google.cloud.aiplatform_v1beta1.types.DedicatedResources): - A description of resources that are dedicated - to the DeployedModel, and that need a higher - degree of manual configuration. - - This field is a member of `oneof`_ ``prediction_resources``. - automatic_resources (google.cloud.aiplatform_v1beta1.types.AutomaticResources): - A description of resources that to large - degree are decided by Vertex AI, and require - only a modest additional configuration. - - This field is a member of `oneof`_ ``prediction_resources``. - id (str): - Immutable. The ID of the DeployedModel. If not provided upon - deployment, Vertex AI will generate a value for this ID. - - This value should be 1-10 characters, and valid characters - are /[0-9]/. - model (str): - Required. The name of the Model that this is - the deployment of. Note that the Model may be in - a different location than the DeployedModel's - Endpoint. - display_name (str): - The display name of the DeployedModel. If not provided upon - creation, the Model's display_name is used. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when the DeployedModel - was created. - explanation_spec (google.cloud.aiplatform_v1beta1.types.ExplanationSpec): - Explanation configuration for this DeployedModel. - - When deploying a Model using - [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel], - this value overrides the value of - [Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec]. - All fields of - [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] - are optional in the request. If a field of - [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] - is not populated, the value of the same field of - [Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec] - is inherited. If the corresponding - [Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec] - is not populated, all fields of the - [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] - will be used for the explanation configuration. - service_account (str): - The service account that the DeployedModel's container runs - as. Specify the email address of the service account. If - this service account is not specified, the container runs as - a service account that doesn't have access to the resource - project. - - Users deploying the Model must have the - ``iam.serviceAccounts.actAs`` permission on this service - account. - enable_container_logging (bool): - If true, the container of the DeployedModel instances will - send ``stderr`` and ``stdout`` streams to Stackdriver - Logging. - - Only supported for custom-trained Models and AutoML Tabular - Models. - enable_access_logging (bool): - These logs are like standard server access - logs, containing information like timestamp and - latency for each prediction request. - Note that Stackdriver logs may incur a cost, - especially if your project receives prediction - requests at a high queries per second rate - (QPS). Estimate your costs before enabling this - option. - private_endpoints (google.cloud.aiplatform_v1beta1.types.PrivateEndpoints): - Output only. Provide paths for users to send - predict/explain/health requests directly to the deployed - model services running on Cloud via private services access. - This field is populated if - [network][google.cloud.aiplatform.v1beta1.Endpoint.network] - is configured. - """ - - dedicated_resources = proto.Field( - proto.MESSAGE, - number=7, - oneof='prediction_resources', - message=machine_resources.DedicatedResources, - ) - automatic_resources = proto.Field( - proto.MESSAGE, - number=8, - oneof='prediction_resources', - message=machine_resources.AutomaticResources, - ) - id = proto.Field( - proto.STRING, - number=1, - ) - model = proto.Field( - proto.STRING, - number=2, - ) - display_name = proto.Field( - proto.STRING, - number=3, - ) - create_time = proto.Field( - proto.MESSAGE, - number=6, - message=timestamp_pb2.Timestamp, - ) - explanation_spec = proto.Field( - proto.MESSAGE, - number=9, - message=explanation.ExplanationSpec, - ) - service_account = proto.Field( - proto.STRING, - number=11, - ) - enable_container_logging = proto.Field( - proto.BOOL, - number=12, - ) - enable_access_logging = proto.Field( - proto.BOOL, - number=13, - ) - private_endpoints = proto.Field( - proto.MESSAGE, - number=14, - message='PrivateEndpoints', - ) - - -class PrivateEndpoints(proto.Message): - r"""PrivateEndpoints proto is used to provide paths for users to send - requests privately. To send request via private service access, use - predict_http_uri, explain_http_uri or health_http_uri. To send - request via private service connect, use service_attachment. - - Attributes: - predict_http_uri (str): - Output only. Http(s) path to send prediction - requests. - explain_http_uri (str): - Output only. Http(s) path to send explain - requests. - health_http_uri (str): - Output only. Http(s) path to send health - check requests. - service_attachment (str): - Output only. The name of the service - attachment resource. Populated if private - service connect is enabled. - """ - - predict_http_uri = proto.Field( - proto.STRING, - number=1, - ) - explain_http_uri = proto.Field( - proto.STRING, - number=2, - ) - health_http_uri = proto.Field( - proto.STRING, - number=3, - ) - service_attachment = proto.Field( - proto.STRING, - number=4, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/endpoint_service.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/endpoint_service.py deleted file mode 100644 index 19ffc389a0..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/endpoint_service.py +++ /dev/null @@ -1,395 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import endpoint as gca_endpoint -from google.cloud.aiplatform_v1beta1.types import operation -from google.protobuf import field_mask_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'CreateEndpointRequest', - 'CreateEndpointOperationMetadata', - 'GetEndpointRequest', - 'ListEndpointsRequest', - 'ListEndpointsResponse', - 'UpdateEndpointRequest', - 'DeleteEndpointRequest', - 'DeployModelRequest', - 'DeployModelResponse', - 'DeployModelOperationMetadata', - 'UndeployModelRequest', - 'UndeployModelResponse', - 'UndeployModelOperationMetadata', - }, -) - - -class CreateEndpointRequest(proto.Message): - r"""Request message for - [EndpointService.CreateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.CreateEndpoint]. - - Attributes: - parent (str): - Required. The resource name of the Location to create the - Endpoint in. Format: - ``projects/{project}/locations/{location}`` - endpoint (google.cloud.aiplatform_v1beta1.types.Endpoint): - Required. The Endpoint to create. - endpoint_id (str): - Immutable. The ID to use for endpoint, which will become the - final component of the endpoint resource name. If not - provided, Vertex AI will generate a value for this ID. - - This value should be 1-10 characters, and valid characters - are /[0-9]/. When using HTTP/JSON, this field is populated - based on a query string argument, such as - ``?endpoint_id=12345``. This is the fallback for fields that - are not included in either the URI or the body. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - endpoint = proto.Field( - proto.MESSAGE, - number=2, - message=gca_endpoint.Endpoint, - ) - endpoint_id = proto.Field( - proto.STRING, - number=4, - ) - - -class CreateEndpointOperationMetadata(proto.Message): - r"""Runtime operation information for - [EndpointService.CreateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.CreateEndpoint]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - The operation generic information. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class GetEndpointRequest(proto.Message): - r"""Request message for - [EndpointService.GetEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.GetEndpoint] - - Attributes: - name (str): - Required. The name of the Endpoint resource. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListEndpointsRequest(proto.Message): - r"""Request message for - [EndpointService.ListEndpoints][google.cloud.aiplatform.v1beta1.EndpointService.ListEndpoints]. - - Attributes: - parent (str): - Required. The resource name of the Location from which to - list the Endpoints. Format: - ``projects/{project}/locations/{location}`` - filter (str): - Optional. An expression for filtering the results of the - request. For field names both snake_case and camelCase are - supported. - - - ``endpoint`` supports = and !=. ``endpoint`` represents - the Endpoint ID, i.e. the last segment of the Endpoint's - [resource - name][google.cloud.aiplatform.v1beta1.Endpoint.name]. - - ``display_name`` supports = and, != - - ``labels`` supports general map functions that is: - - - ``labels.key=value`` - key:value equality - - \`labels.key:\* or labels:key - key existence - - A key including a space must be quoted. - ``labels."a key"``. - - Some examples: - - - ``endpoint=1`` - - ``displayName="myDisplayName"`` - - ``labels.myKey="myValue"`` - page_size (int): - Optional. The standard list page size. - page_token (str): - Optional. The standard list page token. Typically obtained - via - [ListEndpointsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListEndpointsResponse.next_page_token] - of the previous - [EndpointService.ListEndpoints][google.cloud.aiplatform.v1beta1.EndpointService.ListEndpoints] - call. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Optional. Mask specifying which fields to - read. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=5, - message=field_mask_pb2.FieldMask, - ) - - -class ListEndpointsResponse(proto.Message): - r"""Response message for - [EndpointService.ListEndpoints][google.cloud.aiplatform.v1beta1.EndpointService.ListEndpoints]. - - Attributes: - endpoints (Sequence[google.cloud.aiplatform_v1beta1.types.Endpoint]): - List of Endpoints in the requested page. - next_page_token (str): - A token to retrieve the next page of results. Pass to - [ListEndpointsRequest.page_token][google.cloud.aiplatform.v1beta1.ListEndpointsRequest.page_token] - to obtain that page. - """ - - @property - def raw_page(self): - return self - - endpoints = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_endpoint.Endpoint, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class UpdateEndpointRequest(proto.Message): - r"""Request message for - [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.UpdateEndpoint]. - - Attributes: - endpoint (google.cloud.aiplatform_v1beta1.types.Endpoint): - Required. The Endpoint which replaces the - resource on the server. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The update mask applies to the resource. See - [google.protobuf.FieldMask][google.protobuf.FieldMask]. - """ - - endpoint = proto.Field( - proto.MESSAGE, - number=1, - message=gca_endpoint.Endpoint, - ) - update_mask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - - -class DeleteEndpointRequest(proto.Message): - r"""Request message for - [EndpointService.DeleteEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.DeleteEndpoint]. - - Attributes: - name (str): - Required. The name of the Endpoint resource to be deleted. - Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class DeployModelRequest(proto.Message): - r"""Request message for - [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]. - - Attributes: - endpoint (str): - Required. The name of the Endpoint resource into which to - deploy a Model. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - deployed_model (google.cloud.aiplatform_v1beta1.types.DeployedModel): - Required. The DeployedModel to be created within the - Endpoint. Note that - [Endpoint.traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] - must be updated for the DeployedModel to start receiving - traffic, either as part of this call, or via - [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.UpdateEndpoint]. - traffic_split (Sequence[google.cloud.aiplatform_v1beta1.types.DeployModelRequest.TrafficSplitEntry]): - A map from a DeployedModel's ID to the percentage of this - Endpoint's traffic that should be forwarded to that - DeployedModel. - - If this field is non-empty, then the Endpoint's - [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] - will be overwritten with it. To refer to the ID of the just - being deployed Model, a "0" should be used, and the actual - ID of the new DeployedModel will be filled in its place by - this method. The traffic percentage values must add up to - 100. - - If this field is empty, then the Endpoint's - [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] - is not updated. - """ - - endpoint = proto.Field( - proto.STRING, - number=1, - ) - deployed_model = proto.Field( - proto.MESSAGE, - number=2, - message=gca_endpoint.DeployedModel, - ) - traffic_split = proto.MapField( - proto.STRING, - proto.INT32, - number=3, - ) - - -class DeployModelResponse(proto.Message): - r"""Response message for - [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]. - - Attributes: - deployed_model (google.cloud.aiplatform_v1beta1.types.DeployedModel): - The DeployedModel that had been deployed in - the Endpoint. - """ - - deployed_model = proto.Field( - proto.MESSAGE, - number=1, - message=gca_endpoint.DeployedModel, - ) - - -class DeployModelOperationMetadata(proto.Message): - r"""Runtime operation information for - [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - The operation generic information. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class UndeployModelRequest(proto.Message): - r"""Request message for - [EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel]. - - Attributes: - endpoint (str): - Required. The name of the Endpoint resource from which to - undeploy a Model. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - deployed_model_id (str): - Required. The ID of the DeployedModel to be - undeployed from the Endpoint. - traffic_split (Sequence[google.cloud.aiplatform_v1beta1.types.UndeployModelRequest.TrafficSplitEntry]): - If this field is provided, then the Endpoint's - [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] - will be overwritten with it. If last DeployedModel is being - undeployed from the Endpoint, the [Endpoint.traffic_split] - will always end up empty when this call returns. A - DeployedModel will be successfully undeployed only if it - doesn't have any traffic assigned to it when this method - executes, or if this field unassigns any traffic to it. - """ - - endpoint = proto.Field( - proto.STRING, - number=1, - ) - deployed_model_id = proto.Field( - proto.STRING, - number=2, - ) - traffic_split = proto.MapField( - proto.STRING, - proto.INT32, - number=3, - ) - - -class UndeployModelResponse(proto.Message): - r"""Response message for - [EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel]. - - """ - - -class UndeployModelOperationMetadata(proto.Message): - r"""Runtime operation information for - [EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - The operation generic information. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/entity_type.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/entity_type.py deleted file mode 100644 index d01ca4d8a8..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/entity_type.py +++ /dev/null @@ -1,118 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import featurestore_monitoring -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'EntityType', - }, -) - - -class EntityType(proto.Message): - r"""An entity type is a type of object in a system that needs to - be modeled and have stored information about. For example, - driver is an entity type, and driver0 is an instance of an - entity type driver. - - Attributes: - name (str): - Immutable. Name of the EntityType. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - - The last part entity_type is assigned by the client. The - entity_type can be up to 64 characters long and can consist - only of ASCII Latin letters A-Z and a-z and underscore(_), - and ASCII digits 0-9 starting with a letter. The value will - be unique given a featurestore. - description (str): - Optional. Description of the EntityType. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this EntityType - was created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this EntityType - was most recently updated. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.EntityType.LabelsEntry]): - Optional. The labels with user-defined - metadata to organize your EntityTypes. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. - See https://goo.gl/xmQnxf for more information - on and examples of labels. No more than 64 user - labels can be associated with one EntityType - (System labels are excluded)." - System reserved label keys are prefixed with - "aiplatform.googleapis.com/" and are immutable. - etag (str): - Optional. Used to perform a consistent read- - odify-write updates. If not set, a blind - "overwrite" update happens. - monitoring_config (google.cloud.aiplatform_v1beta1.types.FeaturestoreMonitoringConfig): - Optional. The default monitoring configuration for all - Features with value type - ([Feature.ValueType][google.cloud.aiplatform.v1beta1.Feature.ValueType]) - BOOL, STRING, DOUBLE or INT64 under this EntityType. - - If this is populated with - [FeaturestoreMonitoringConfig.monitoring_interval] - specified, snapshot analysis monitoring is enabled. - Otherwise, snapshot analysis monitoring is disabled. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - description = proto.Field( - proto.STRING, - number=2, - ) - create_time = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=6, - ) - etag = proto.Field( - proto.STRING, - number=7, - ) - monitoring_config = proto.Field( - proto.MESSAGE, - number=8, - message=featurestore_monitoring.FeaturestoreMonitoringConfig, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/env_var.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/env_var.py deleted file mode 100644 index 2775473b9e..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/env_var.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'EnvVar', - }, -) - - -class EnvVar(proto.Message): - r"""Represents an environment variable present in a Container or - Python Module. - - Attributes: - name (str): - Required. Name of the environment variable. - Must be a valid C identifier. - value (str): - Required. Variables that reference a $(VAR_NAME) are - expanded using the previous defined environment variables in - the container and any service environment variables. If a - variable cannot be resolved, the reference in the input - string will be unchanged. The $(VAR_NAME) syntax can be - escaped with a double $$, ie: $$(VAR_NAME). Escaped - references will never be expanded, regardless of whether the - variable exists or not. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - value = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/event.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/event.py deleted file mode 100644 index ac1f78d44a..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/event.py +++ /dev/null @@ -1,93 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'Event', - }, -) - - -class Event(proto.Message): - r"""An edge describing the relationship between an Artifact and - an Execution in a lineage graph. - - Attributes: - artifact (str): - Required. The relative resource name of the - Artifact in the Event. - execution (str): - Output only. The relative resource name of - the Execution in the Event. - event_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time the Event occurred. - type_ (google.cloud.aiplatform_v1beta1.types.Event.Type): - Required. The type of the Event. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.Event.LabelsEntry]): - The labels with user-defined metadata to - annotate Events. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. No more than 64 user labels can be - associated with one Event (System labels are - excluded). - - See https://goo.gl/xmQnxf for more information - and examples of labels. System reserved label - keys are prefixed with - "aiplatform.googleapis.com/" and are immutable. - """ - class Type(proto.Enum): - r"""Describes whether an Event's Artifact is the Execution's - input or output. - """ - TYPE_UNSPECIFIED = 0 - INPUT = 1 - OUTPUT = 2 - - artifact = proto.Field( - proto.STRING, - number=1, - ) - execution = proto.Field( - proto.STRING, - number=2, - ) - event_time = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - type_ = proto.Field( - proto.ENUM, - number=4, - enum=Type, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=5, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/execution.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/execution.py deleted file mode 100644 index eff996b0d6..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/execution.py +++ /dev/null @@ -1,149 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'Execution', - }, -) - - -class Execution(proto.Message): - r"""Instance of a general execution. - - Attributes: - name (str): - Output only. The resource name of the - Execution. - display_name (str): - User provided display name of the Execution. - May be up to 128 Unicode characters. - state (google.cloud.aiplatform_v1beta1.types.Execution.State): - The state of this Execution. This is a - property of the Execution, and does not imply or - capture any ongoing process. This property is - managed by clients (such as Vertex AI Pipelines) - and the system does not prescribe or check the - validity of state transitions. - etag (str): - An eTag used to perform consistent read- - odify-write updates. If not set, a blind - "overwrite" update happens. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.Execution.LabelsEntry]): - The labels with user-defined metadata to - organize your Executions. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. No more than 64 user labels can be - associated with one Execution (System labels are - excluded). - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Execution - was created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Execution - was last updated. - schema_title (str): - The title of the schema describing the - metadata. - Schema title and version is expected to be - registered in earlier Create Schema calls. And - both are used together as unique identifiers to - identify schemas within the local metadata - store. - schema_version (str): - The version of the schema in ``schema_title`` to use. - - Schema title and version is expected to be registered in - earlier Create Schema calls. And both are used together as - unique identifiers to identify schemas within the local - metadata store. - metadata (google.protobuf.struct_pb2.Struct): - Properties of the Execution. - The size of this field should not exceed 200KB. - description (str): - Description of the Execution - """ - class State(proto.Enum): - r"""Describes the state of the Execution.""" - STATE_UNSPECIFIED = 0 - NEW = 1 - RUNNING = 2 - COMPLETE = 3 - FAILED = 4 - CACHED = 5 - CANCELLED = 6 - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - state = proto.Field( - proto.ENUM, - number=6, - enum=State, - ) - etag = proto.Field( - proto.STRING, - number=9, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=10, - ) - create_time = proto.Field( - proto.MESSAGE, - number=11, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=12, - message=timestamp_pb2.Timestamp, - ) - schema_title = proto.Field( - proto.STRING, - number=13, - ) - schema_version = proto.Field( - proto.STRING, - number=14, - ) - metadata = proto.Field( - proto.MESSAGE, - number=15, - message=struct_pb2.Struct, - ) - description = proto.Field( - proto.STRING, - number=16, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/explanation.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/explanation.py deleted file mode 100644 index c5b2849040..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/explanation.py +++ /dev/null @@ -1,760 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import explanation_metadata -from google.cloud.aiplatform_v1beta1.types import io -from google.protobuf import struct_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'Explanation', - 'ModelExplanation', - 'Attribution', - 'ExplanationSpec', - 'ExplanationParameters', - 'SampledShapleyAttribution', - 'IntegratedGradientsAttribution', - 'XraiAttribution', - 'SmoothGradConfig', - 'FeatureNoiseSigma', - 'BlurBaselineConfig', - 'Similarity', - 'ExplanationSpecOverride', - 'ExplanationMetadataOverride', - }, -) - - -class Explanation(proto.Message): - r"""Explanation of a prediction (provided in - [PredictResponse.predictions][google.cloud.aiplatform.v1beta1.PredictResponse.predictions]) - produced by the Model on a given - [instance][google.cloud.aiplatform.v1beta1.ExplainRequest.instances]. - - Attributes: - attributions (Sequence[google.cloud.aiplatform_v1beta1.types.Attribution]): - Output only. Feature attributions grouped by predicted - outputs. - - For Models that predict only one output, such as regression - Models that predict only one score, there is only one - attibution that explains the predicted output. For Models - that predict multiple outputs, such as multiclass Models - that predict multiple classes, each element explains one - specific item. - [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index] - can be used to identify which output this attribution is - explaining. - - If users set - [ExplanationParameters.top_k][google.cloud.aiplatform.v1beta1.ExplanationParameters.top_k], - the attributions are sorted by - [instance_output_value][Attributions.instance_output_value] - in descending order. If - [ExplanationParameters.output_indices][google.cloud.aiplatform.v1beta1.ExplanationParameters.output_indices] - is specified, the attributions are stored by - [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index] - in the same order as they appear in the output_indices. - """ - - attributions = proto.RepeatedField( - proto.MESSAGE, - number=1, - message='Attribution', - ) - - -class ModelExplanation(proto.Message): - r"""Aggregated explanation metrics for a Model over a set of - instances. - - Attributes: - mean_attributions (Sequence[google.cloud.aiplatform_v1beta1.types.Attribution]): - Output only. Aggregated attributions explaining the Model's - prediction outputs over the set of instances. The - attributions are grouped by outputs. - - For Models that predict only one output, such as regression - Models that predict only one score, there is only one - attibution that explains the predicted output. For Models - that predict multiple outputs, such as multiclass Models - that predict multiple classes, each element explains one - specific item. - [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index] - can be used to identify which output this attribution is - explaining. - - The - [baselineOutputValue][google.cloud.aiplatform.v1beta1.Attribution.baseline_output_value], - [instanceOutputValue][google.cloud.aiplatform.v1beta1.Attribution.instance_output_value] - and - [featureAttributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions] - fields are averaged over the test data. - - NOTE: Currently AutoML tabular classification Models produce - only one attribution, which averages attributions over all - the classes it predicts. - [Attribution.approximation_error][google.cloud.aiplatform.v1beta1.Attribution.approximation_error] - is not populated. - """ - - mean_attributions = proto.RepeatedField( - proto.MESSAGE, - number=1, - message='Attribution', - ) - - -class Attribution(proto.Message): - r"""Attribution that explains a particular prediction output. - - Attributes: - baseline_output_value (float): - Output only. Model predicted output if the input instance is - constructed from the baselines of all the features defined - in - [ExplanationMetadata.inputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs]. - The field name of the output is determined by the key in - [ExplanationMetadata.outputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.outputs]. - - If the Model's predicted output has multiple dimensions - (rank > 1), this is the value in the output located by - [output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index]. - - If there are multiple baselines, their output values are - averaged. - instance_output_value (float): - Output only. Model predicted output on the corresponding - [explanation instance][ExplainRequest.instances]. The field - name of the output is determined by the key in - [ExplanationMetadata.outputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.outputs]. - - If the Model predicted output has multiple dimensions, this - is the value in the output located by - [output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index]. - feature_attributions (google.protobuf.struct_pb2.Value): - Output only. Attributions of each explained feature. - Features are extracted from the [prediction - instances][google.cloud.aiplatform.v1beta1.ExplainRequest.instances] - according to [explanation metadata for - inputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs]. - - The value is a struct, whose keys are the name of the - feature. The values are how much the feature in the - [instance][google.cloud.aiplatform.v1beta1.ExplainRequest.instances] - contributed to the predicted result. - - The format of the value is determined by the feature's input - format: - - - If the feature is a scalar value, the attribution value - is a [floating - number][google.protobuf.Value.number_value]. - - - If the feature is an array of scalar values, the - attribution value is an - [array][google.protobuf.Value.list_value]. - - - If the feature is a struct, the attribution value is a - [struct][google.protobuf.Value.struct_value]. The keys in - the attribution value struct are the same as the keys in - the feature struct. The formats of the values in the - attribution struct are determined by the formats of the - values in the feature struct. - - The - [ExplanationMetadata.feature_attributions_schema_uri][google.cloud.aiplatform.v1beta1.ExplanationMetadata.feature_attributions_schema_uri] - field, pointed to by the - [ExplanationSpec][google.cloud.aiplatform.v1beta1.ExplanationSpec] - field of the - [Endpoint.deployed_models][google.cloud.aiplatform.v1beta1.Endpoint.deployed_models] - object, points to the schema file that describes the - features and their attribution values (if it is populated). - output_index (Sequence[int]): - Output only. The index that locates the explained prediction - output. - - If the prediction output is a scalar value, output_index is - not populated. If the prediction output has multiple - dimensions, the length of the output_index list is the same - as the number of dimensions of the output. The i-th element - in output_index is the element index of the i-th dimension - of the output vector. Indices start from 0. - output_display_name (str): - Output only. The display name of the output identified by - [output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index]. - For example, the predicted class name by a - multi-classification Model. - - This field is only populated iff the Model predicts display - names as a separate field along with the explained output. - The predicted display name must has the same shape of the - explained output, and can be located using output_index. - approximation_error (float): - Output only. Error of - [feature_attributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions] - caused by approximation used in the explanation method. - Lower value means more precise attributions. - - - For Sampled Shapley - [attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.sampled_shapley_attribution], - increasing - [path_count][google.cloud.aiplatform.v1beta1.SampledShapleyAttribution.path_count] - might reduce the error. - - For Integrated Gradients - [attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.integrated_gradients_attribution], - increasing - [step_count][google.cloud.aiplatform.v1beta1.IntegratedGradientsAttribution.step_count] - might reduce the error. - - For [XRAI - attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.xrai_attribution], - increasing - [step_count][google.cloud.aiplatform.v1beta1.XraiAttribution.step_count] - might reduce the error. - - See `this - introduction `__ - for more information. - output_name (str): - Output only. Name of the explain output. Specified as the - key in - [ExplanationMetadata.outputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.outputs]. - """ - - baseline_output_value = proto.Field( - proto.DOUBLE, - number=1, - ) - instance_output_value = proto.Field( - proto.DOUBLE, - number=2, - ) - feature_attributions = proto.Field( - proto.MESSAGE, - number=3, - message=struct_pb2.Value, - ) - output_index = proto.RepeatedField( - proto.INT32, - number=4, - ) - output_display_name = proto.Field( - proto.STRING, - number=5, - ) - approximation_error = proto.Field( - proto.DOUBLE, - number=6, - ) - output_name = proto.Field( - proto.STRING, - number=7, - ) - - -class ExplanationSpec(proto.Message): - r"""Specification of Model explanation. - - Attributes: - parameters (google.cloud.aiplatform_v1beta1.types.ExplanationParameters): - Required. Parameters that configure - explaining of the Model's predictions. - metadata (google.cloud.aiplatform_v1beta1.types.ExplanationMetadata): - Required. Metadata describing the Model's - input and output for explanation. - """ - - parameters = proto.Field( - proto.MESSAGE, - number=1, - message='ExplanationParameters', - ) - metadata = proto.Field( - proto.MESSAGE, - number=2, - message=explanation_metadata.ExplanationMetadata, - ) - - -class ExplanationParameters(proto.Message): - r"""Parameters to configure explaining for Model's predictions. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - sampled_shapley_attribution (google.cloud.aiplatform_v1beta1.types.SampledShapleyAttribution): - An attribution method that approximates - Shapley values for features that contribute to - the label being predicted. A sampling strategy - is used to approximate the value rather than - considering all subsets of features. Refer to - this paper for model details: - https://arxiv.org/abs/1306.4265. - - This field is a member of `oneof`_ ``method``. - integrated_gradients_attribution (google.cloud.aiplatform_v1beta1.types.IntegratedGradientsAttribution): - An attribution method that computes Aumann- - hapley values taking advantage of the model's - fully differentiable structure. Refer to this - paper for more details: - https://arxiv.org/abs/1703.01365 - - This field is a member of `oneof`_ ``method``. - xrai_attribution (google.cloud.aiplatform_v1beta1.types.XraiAttribution): - An attribution method that redistributes - Integrated Gradients attribution to segmented - regions, taking advantage of the model's fully - differentiable structure. Refer to this paper - for more details: - https://arxiv.org/abs/1906.02825 - XRAI currently performs better on natural - images, like a picture of a house or an animal. - If the images are taken in artificial - environments, like a lab or manufacturing line, - or from diagnostic equipment, like x-rays or - quality-control cameras, use Integrated - Gradients instead. - - This field is a member of `oneof`_ ``method``. - similarity (google.cloud.aiplatform_v1beta1.types.Similarity): - Similarity explainability that returns the - nearest neighbors from the provided dataset. - - This field is a member of `oneof`_ ``method``. - top_k (int): - If populated, returns attributions for top K - indices of outputs (defaults to 1). Only applies - to Models that predicts more than one outputs - (e,g, multi-class Models). When set to -1, - returns explanations for all outputs. - output_indices (google.protobuf.struct_pb2.ListValue): - If populated, only returns attributions that have - [output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index] - contained in output_indices. It must be an ndarray of - integers, with the same shape of the output it's explaining. - - If not populated, returns attributions for - [top_k][google.cloud.aiplatform.v1beta1.ExplanationParameters.top_k] - indices of outputs. If neither top_k nor output_indeices is - populated, returns the argmax index of the outputs. - - Only applicable to Models that predict multiple outputs - (e,g, multi-class Models that predict multiple classes). - """ - - sampled_shapley_attribution = proto.Field( - proto.MESSAGE, - number=1, - oneof='method', - message='SampledShapleyAttribution', - ) - integrated_gradients_attribution = proto.Field( - proto.MESSAGE, - number=2, - oneof='method', - message='IntegratedGradientsAttribution', - ) - xrai_attribution = proto.Field( - proto.MESSAGE, - number=3, - oneof='method', - message='XraiAttribution', - ) - similarity = proto.Field( - proto.MESSAGE, - number=7, - oneof='method', - message='Similarity', - ) - top_k = proto.Field( - proto.INT32, - number=4, - ) - output_indices = proto.Field( - proto.MESSAGE, - number=5, - message=struct_pb2.ListValue, - ) - - -class SampledShapleyAttribution(proto.Message): - r"""An attribution method that approximates Shapley values for - features that contribute to the label being predicted. A - sampling strategy is used to approximate the value rather than - considering all subsets of features. - - Attributes: - path_count (int): - Required. The number of feature permutations to consider - when approximating the Shapley values. - - Valid range of its value is [1, 50], inclusively. - """ - - path_count = proto.Field( - proto.INT32, - number=1, - ) - - -class IntegratedGradientsAttribution(proto.Message): - r"""An attribution method that computes the Aumann-Shapley value - taking advantage of the model's fully differentiable structure. - Refer to this paper for more details: - https://arxiv.org/abs/1703.01365 - - Attributes: - step_count (int): - Required. The number of steps for approximating the path - integral. A good value to start is 50 and gradually increase - until the sum to diff property is within the desired error - range. - - Valid range of its value is [1, 100], inclusively. - smooth_grad_config (google.cloud.aiplatform_v1beta1.types.SmoothGradConfig): - Config for SmoothGrad approximation of - gradients. - When enabled, the gradients are approximated by - averaging the gradients from noisy samples in - the vicinity of the inputs. Adding noise can - help improve the computed gradients. Refer to - this paper for more details: - https://arxiv.org/pdf/1706.03825.pdf - blur_baseline_config (google.cloud.aiplatform_v1beta1.types.BlurBaselineConfig): - Config for IG with blur baseline. - When enabled, a linear path from the maximally - blurred image to the input image is created. - Using a blurred baseline instead of zero (black - image) is motivated by the BlurIG approach - explained here: https://arxiv.org/abs/2004.03383 - """ - - step_count = proto.Field( - proto.INT32, - number=1, - ) - smooth_grad_config = proto.Field( - proto.MESSAGE, - number=2, - message='SmoothGradConfig', - ) - blur_baseline_config = proto.Field( - proto.MESSAGE, - number=3, - message='BlurBaselineConfig', - ) - - -class XraiAttribution(proto.Message): - r"""An explanation method that redistributes Integrated Gradients - attributions to segmented regions, taking advantage of the - model's fully differentiable structure. Refer to this paper for - more details: https://arxiv.org/abs/1906.02825 - - Supported only by image Models. - - Attributes: - step_count (int): - Required. The number of steps for approximating the path - integral. A good value to start is 50 and gradually increase - until the sum to diff property is met within the desired - error range. - - Valid range of its value is [1, 100], inclusively. - smooth_grad_config (google.cloud.aiplatform_v1beta1.types.SmoothGradConfig): - Config for SmoothGrad approximation of - gradients. - When enabled, the gradients are approximated by - averaging the gradients from noisy samples in - the vicinity of the inputs. Adding noise can - help improve the computed gradients. Refer to - this paper for more details: - https://arxiv.org/pdf/1706.03825.pdf - blur_baseline_config (google.cloud.aiplatform_v1beta1.types.BlurBaselineConfig): - Config for XRAI with blur baseline. - When enabled, a linear path from the maximally - blurred image to the input image is created. - Using a blurred baseline instead of zero (black - image) is motivated by the BlurIG approach - explained here: https://arxiv.org/abs/2004.03383 - """ - - step_count = proto.Field( - proto.INT32, - number=1, - ) - smooth_grad_config = proto.Field( - proto.MESSAGE, - number=2, - message='SmoothGradConfig', - ) - blur_baseline_config = proto.Field( - proto.MESSAGE, - number=3, - message='BlurBaselineConfig', - ) - - -class SmoothGradConfig(proto.Message): - r"""Config for SmoothGrad approximation of gradients. - When enabled, the gradients are approximated by averaging the - gradients from noisy samples in the vicinity of the inputs. - Adding noise can help improve the computed gradients. Refer to - this paper for more details: - https://arxiv.org/pdf/1706.03825.pdf - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - noise_sigma (float): - This is a single float value and will be used to add noise - to all the features. Use this field when all features are - normalized to have the same distribution: scale to range [0, - 1], [-1, 1] or z-scoring, where features are normalized to - have 0-mean and 1-variance. Learn more about - `normalization `__. - - For best results the recommended value is about 10% - 20% of - the standard deviation of the input feature. Refer to - section 3.2 of the SmoothGrad paper: - https://arxiv.org/pdf/1706.03825.pdf. Defaults to 0.1. - - If the distribution is different per feature, set - [feature_noise_sigma][google.cloud.aiplatform.v1beta1.SmoothGradConfig.feature_noise_sigma] - instead for each feature. - - This field is a member of `oneof`_ ``GradientNoiseSigma``. - feature_noise_sigma (google.cloud.aiplatform_v1beta1.types.FeatureNoiseSigma): - This is similar to - [noise_sigma][google.cloud.aiplatform.v1beta1.SmoothGradConfig.noise_sigma], - but provides additional flexibility. A separate noise sigma - can be provided for each feature, which is useful if their - distributions are different. No noise is added to features - that are not set. If this field is unset, - [noise_sigma][google.cloud.aiplatform.v1beta1.SmoothGradConfig.noise_sigma] - will be used for all features. - - This field is a member of `oneof`_ ``GradientNoiseSigma``. - noisy_sample_count (int): - The number of gradient samples to use for approximation. The - higher this number, the more accurate the gradient is, but - the runtime complexity increases by this factor as well. - Valid range of its value is [1, 50]. Defaults to 3. - """ - - noise_sigma = proto.Field( - proto.FLOAT, - number=1, - oneof='GradientNoiseSigma', - ) - feature_noise_sigma = proto.Field( - proto.MESSAGE, - number=2, - oneof='GradientNoiseSigma', - message='FeatureNoiseSigma', - ) - noisy_sample_count = proto.Field( - proto.INT32, - number=3, - ) - - -class FeatureNoiseSigma(proto.Message): - r"""Noise sigma by features. Noise sigma represents the standard - deviation of the gaussian kernel that will be used to add noise - to interpolated inputs prior to computing gradients. - - Attributes: - noise_sigma (Sequence[google.cloud.aiplatform_v1beta1.types.FeatureNoiseSigma.NoiseSigmaForFeature]): - Noise sigma per feature. No noise is added to - features that are not set. - """ - - class NoiseSigmaForFeature(proto.Message): - r"""Noise sigma for a single feature. - - Attributes: - name (str): - The name of the input feature for which noise sigma is - provided. The features are defined in [explanation metadata - inputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs]. - sigma (float): - This represents the standard deviation of the Gaussian - kernel that will be used to add noise to the feature prior - to computing gradients. Similar to - [noise_sigma][google.cloud.aiplatform.v1beta1.SmoothGradConfig.noise_sigma] - but represents the noise added to the current feature. - Defaults to 0.1. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - sigma = proto.Field( - proto.FLOAT, - number=2, - ) - - noise_sigma = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=NoiseSigmaForFeature, - ) - - -class BlurBaselineConfig(proto.Message): - r"""Config for blur baseline. - When enabled, a linear path from the maximally blurred image to - the input image is created. Using a blurred baseline instead of - zero (black image) is motivated by the BlurIG approach explained - here: - https://arxiv.org/abs/2004.03383 - - Attributes: - max_blur_sigma (float): - The standard deviation of the blur kernel for - the blurred baseline. The same blurring - parameter is used for both the height and the - width dimension. If not set, the method defaults - to the zero (i.e. black for images) baseline. - """ - - max_blur_sigma = proto.Field( - proto.FLOAT, - number=1, - ) - - -class Similarity(proto.Message): - r"""Similarity explainability that returns the nearest neighbors - from the provided dataset. - - Attributes: - gcs_source (google.cloud.aiplatform_v1beta1.types.GcsSource): - The Cloud Storage location for the input - instances. - nearest_neighbor_search_config (google.protobuf.struct_pb2.Value): - The configuration for the generated index, the semantics are - the same as - [metadata][google.cloud.aiplatform.v1beta1.Index.metadata] - and should match NearestNeighborSearchConfig. - """ - - gcs_source = proto.Field( - proto.MESSAGE, - number=1, - message=io.GcsSource, - ) - nearest_neighbor_search_config = proto.Field( - proto.MESSAGE, - number=2, - message=struct_pb2.Value, - ) - - -class ExplanationSpecOverride(proto.Message): - r"""The - [ExplanationSpec][google.cloud.aiplatform.v1beta1.ExplanationSpec] - entries that can be overridden at [online - explanation][google.cloud.aiplatform.v1beta1.PredictionService.Explain] - time. - - Attributes: - parameters (google.cloud.aiplatform_v1beta1.types.ExplanationParameters): - The parameters to be overridden. Note that the - [method][google.cloud.aiplatform.v1beta1.ExplanationParameters.method] - cannot be changed. If not specified, no parameter is - overridden. - metadata (google.cloud.aiplatform_v1beta1.types.ExplanationMetadataOverride): - The metadata to be overridden. If not - specified, no metadata is overridden. - """ - - parameters = proto.Field( - proto.MESSAGE, - number=1, - message='ExplanationParameters', - ) - metadata = proto.Field( - proto.MESSAGE, - number=2, - message='ExplanationMetadataOverride', - ) - - -class ExplanationMetadataOverride(proto.Message): - r"""The - [ExplanationMetadata][google.cloud.aiplatform.v1beta1.ExplanationMetadata] - entries that can be overridden at [online - explanation][google.cloud.aiplatform.v1beta1.PredictionService.Explain] - time. - - Attributes: - inputs (Sequence[google.cloud.aiplatform_v1beta1.types.ExplanationMetadataOverride.InputsEntry]): - Required. Overrides the [input - metadata][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs] - of the features. The key is the name of the feature to be - overridden. The keys specified here must exist in the input - metadata to be overridden. If a feature is not specified - here, the corresponding feature's input metadata is not - overridden. - """ - - class InputMetadataOverride(proto.Message): - r"""The [input - metadata][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata] - entries to be overridden. - - Attributes: - input_baselines (Sequence[google.protobuf.struct_pb2.Value]): - Baseline inputs for this feature. - - This overrides the ``input_baseline`` field of the - [ExplanationMetadata.InputMetadata][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata] - object of the corresponding feature's input metadata. If - it's not specified, the original baselines are not - overridden. - """ - - input_baselines = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=struct_pb2.Value, - ) - - inputs = proto.MapField( - proto.STRING, - proto.MESSAGE, - number=1, - message=InputMetadataOverride, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/explanation_metadata.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/explanation_metadata.py deleted file mode 100644 index 7e5ca85912..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/explanation_metadata.py +++ /dev/null @@ -1,460 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import struct_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'ExplanationMetadata', - }, -) - - -class ExplanationMetadata(proto.Message): - r"""Metadata describing the Model's input and output for - explanation. - - Attributes: - inputs (Sequence[google.cloud.aiplatform_v1beta1.types.ExplanationMetadata.InputsEntry]): - Required. Map from feature names to feature input metadata. - Keys are the name of the features. Values are the - specification of the feature. - - An empty InputMetadata is valid. It describes a text feature - which has the name specified as the key in - [ExplanationMetadata.inputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs]. - The baseline of the empty feature is chosen by Vertex AI. - - For Vertex AI-provided Tensorflow images, the key can be any - friendly name of the feature. Once specified, - [featureAttributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions] - are keyed by this key (if not grouped with another feature). - - For custom images, the key must match with the key in - [instance][google.cloud.aiplatform.v1beta1.ExplainRequest.instances]. - outputs (Sequence[google.cloud.aiplatform_v1beta1.types.ExplanationMetadata.OutputsEntry]): - Required. Map from output names to output - metadata. - For Vertex AI-provided Tensorflow images, keys - can be any user defined string that consists of - any UTF-8 characters. - For custom images, keys are the name of the - output field in the prediction to be explained. - - Currently only one key is allowed. - feature_attributions_schema_uri (str): - Points to a YAML file stored on Google Cloud Storage - describing the format of the [feature - attributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions]. - The schema is defined as an OpenAPI 3.0.2 `Schema - Object `__. - AutoML tabular Models always have this field populated by - Vertex AI. Note: The URI given on output may be different, - including the URI scheme, than the one given on input. The - output URI will point to a location where the user only has - a read access. - """ - - class InputMetadata(proto.Message): - r"""Metadata of the input of a feature. - - Fields other than - [InputMetadata.input_baselines][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.input_baselines] - are applicable only for Models that are using Vertex AI-provided - images for Tensorflow. - - Attributes: - input_baselines (Sequence[google.protobuf.struct_pb2.Value]): - Baseline inputs for this feature. - - If no baseline is specified, Vertex AI chooses the baseline - for this feature. If multiple baselines are specified, - Vertex AI returns the average attributions across them in - [Attribution.feature_attributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions]. - - For Vertex AI-provided Tensorflow images (both 1.x and 2.x), - the shape of each baseline must match the shape of the input - tensor. If a scalar is provided, we broadcast to the same - shape as the input tensor. - - For custom images, the element of the baselines must be in - the same format as the feature's input in the - [instance][google.cloud.aiplatform.v1beta1.ExplainRequest.instances][]. - The schema of any single instance may be specified via - Endpoint's DeployedModels' - [Model's][google.cloud.aiplatform.v1beta1.DeployedModel.model] - [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]. - input_tensor_name (str): - Name of the input tensor for this feature. - Required and is only applicable to Vertex AI- - provided images for Tensorflow. - encoding (google.cloud.aiplatform_v1beta1.types.ExplanationMetadata.InputMetadata.Encoding): - Defines how the feature is encoded into the - input tensor. Defaults to IDENTITY. - modality (str): - Modality of the feature. Valid values are: - numeric, image. Defaults to numeric. - feature_value_domain (google.cloud.aiplatform_v1beta1.types.ExplanationMetadata.InputMetadata.FeatureValueDomain): - The domain details of the input feature - value. Like min/max, original mean or standard - deviation if normalized. - indices_tensor_name (str): - Specifies the index of the values of the input tensor. - Required when the input tensor is a sparse representation. - Refer to Tensorflow documentation for more details: - https://www.tensorflow.org/api_docs/python/tf/sparse/SparseTensor. - dense_shape_tensor_name (str): - Specifies the shape of the values of the input if the input - is a sparse representation. Refer to Tensorflow - documentation for more details: - https://www.tensorflow.org/api_docs/python/tf/sparse/SparseTensor. - index_feature_mapping (Sequence[str]): - A list of feature names for each index in the input tensor. - Required when the input - [InputMetadata.encoding][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.encoding] - is BAG_OF_FEATURES, BAG_OF_FEATURES_SPARSE, INDICATOR. - encoded_tensor_name (str): - Encoded tensor is a transformation of the input tensor. Must - be provided if choosing [Integrated Gradients - attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.integrated_gradients_attribution] - or [XRAI - attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.xrai_attribution] - and the input tensor is not differentiable. - - An encoded tensor is generated if the input tensor is - encoded by a lookup table. - encoded_baselines (Sequence[google.protobuf.struct_pb2.Value]): - A list of baselines for the encoded tensor. - The shape of each baseline should match the - shape of the encoded tensor. If a scalar is - provided, Vertex AI broadcasts to the same shape - as the encoded tensor. - visualization (google.cloud.aiplatform_v1beta1.types.ExplanationMetadata.InputMetadata.Visualization): - Visualization configurations for image - explanation. - group_name (str): - Name of the group that the input belongs to. Features with - the same group name will be treated as one feature when - computing attributions. Features grouped together can have - different shapes in value. If provided, there will be one - single attribution generated in - [Attribution.feature_attributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions], - keyed by the group name. - """ - class Encoding(proto.Enum): - r"""Defines how a feature is encoded. Defaults to IDENTITY.""" - ENCODING_UNSPECIFIED = 0 - IDENTITY = 1 - BAG_OF_FEATURES = 2 - BAG_OF_FEATURES_SPARSE = 3 - INDICATOR = 4 - COMBINED_EMBEDDING = 5 - CONCAT_EMBEDDING = 6 - - class FeatureValueDomain(proto.Message): - r"""Domain details of the input feature value. Provides numeric - information about the feature, such as its range (min, max). If the - feature has been pre-processed, for example with z-scoring, then it - provides information about how to recover the original feature. For - example, if the input feature is an image and it has been - pre-processed to obtain 0-mean and stddev = 1 values, then - original_mean, and original_stddev refer to the mean and stddev of - the original feature (e.g. image tensor) from which input feature - (with mean = 0 and stddev = 1) was obtained. - - Attributes: - min_value (float): - The minimum permissible value for this - feature. - max_value (float): - The maximum permissible value for this - feature. - original_mean (float): - If this input feature has been normalized to a mean value of - 0, the original_mean specifies the mean value of the domain - prior to normalization. - original_stddev (float): - If this input feature has been normalized to a standard - deviation of 1.0, the original_stddev specifies the standard - deviation of the domain prior to normalization. - """ - - min_value = proto.Field( - proto.FLOAT, - number=1, - ) - max_value = proto.Field( - proto.FLOAT, - number=2, - ) - original_mean = proto.Field( - proto.FLOAT, - number=3, - ) - original_stddev = proto.Field( - proto.FLOAT, - number=4, - ) - - class Visualization(proto.Message): - r"""Visualization configurations for image explanation. - - Attributes: - type_ (google.cloud.aiplatform_v1beta1.types.ExplanationMetadata.InputMetadata.Visualization.Type): - Type of the image visualization. Only applicable to - [Integrated Gradients - attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.integrated_gradients_attribution]. - OUTLINES shows regions of attribution, while PIXELS shows - per-pixel attribution. Defaults to OUTLINES. - polarity (google.cloud.aiplatform_v1beta1.types.ExplanationMetadata.InputMetadata.Visualization.Polarity): - Whether to only highlight pixels with - positive contributions, negative or both. - Defaults to POSITIVE. - color_map (google.cloud.aiplatform_v1beta1.types.ExplanationMetadata.InputMetadata.Visualization.ColorMap): - The color scheme used for the highlighted areas. - - Defaults to PINK_GREEN for [Integrated Gradients - attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.integrated_gradients_attribution], - which shows positive attributions in green and negative in - pink. - - Defaults to VIRIDIS for [XRAI - attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.xrai_attribution], - which highlights the most influential regions in yellow and - the least influential in blue. - clip_percent_upperbound (float): - Excludes attributions above the specified percentile from - the highlighted areas. Using the clip_percent_upperbound and - clip_percent_lowerbound together can be useful for filtering - out noise and making it easier to see areas of strong - attribution. Defaults to 99.9. - clip_percent_lowerbound (float): - Excludes attributions below the specified - percentile, from the highlighted areas. Defaults - to 62. - overlay_type (google.cloud.aiplatform_v1beta1.types.ExplanationMetadata.InputMetadata.Visualization.OverlayType): - How the original image is displayed in the - visualization. Adjusting the overlay can help - increase visual clarity if the original image - makes it difficult to view the visualization. - Defaults to NONE. - """ - class Type(proto.Enum): - r"""Type of the image visualization. Only applicable to [Integrated - Gradients - attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.integrated_gradients_attribution]. - """ - TYPE_UNSPECIFIED = 0 - PIXELS = 1 - OUTLINES = 2 - - class Polarity(proto.Enum): - r"""Whether to only highlight pixels with positive contributions, - negative or both. Defaults to POSITIVE. - """ - POLARITY_UNSPECIFIED = 0 - POSITIVE = 1 - NEGATIVE = 2 - BOTH = 3 - - class ColorMap(proto.Enum): - r"""The color scheme used for highlighting areas.""" - COLOR_MAP_UNSPECIFIED = 0 - PINK_GREEN = 1 - VIRIDIS = 2 - RED = 3 - GREEN = 4 - RED_GREEN = 6 - PINK_WHITE_GREEN = 5 - - class OverlayType(proto.Enum): - r"""How the original image is displayed in the visualization.""" - OVERLAY_TYPE_UNSPECIFIED = 0 - NONE = 1 - ORIGINAL = 2 - GRAYSCALE = 3 - MASK_BLACK = 4 - - type_ = proto.Field( - proto.ENUM, - number=1, - enum='ExplanationMetadata.InputMetadata.Visualization.Type', - ) - polarity = proto.Field( - proto.ENUM, - number=2, - enum='ExplanationMetadata.InputMetadata.Visualization.Polarity', - ) - color_map = proto.Field( - proto.ENUM, - number=3, - enum='ExplanationMetadata.InputMetadata.Visualization.ColorMap', - ) - clip_percent_upperbound = proto.Field( - proto.FLOAT, - number=4, - ) - clip_percent_lowerbound = proto.Field( - proto.FLOAT, - number=5, - ) - overlay_type = proto.Field( - proto.ENUM, - number=6, - enum='ExplanationMetadata.InputMetadata.Visualization.OverlayType', - ) - - input_baselines = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=struct_pb2.Value, - ) - input_tensor_name = proto.Field( - proto.STRING, - number=2, - ) - encoding = proto.Field( - proto.ENUM, - number=3, - enum='ExplanationMetadata.InputMetadata.Encoding', - ) - modality = proto.Field( - proto.STRING, - number=4, - ) - feature_value_domain = proto.Field( - proto.MESSAGE, - number=5, - message='ExplanationMetadata.InputMetadata.FeatureValueDomain', - ) - indices_tensor_name = proto.Field( - proto.STRING, - number=6, - ) - dense_shape_tensor_name = proto.Field( - proto.STRING, - number=7, - ) - index_feature_mapping = proto.RepeatedField( - proto.STRING, - number=8, - ) - encoded_tensor_name = proto.Field( - proto.STRING, - number=9, - ) - encoded_baselines = proto.RepeatedField( - proto.MESSAGE, - number=10, - message=struct_pb2.Value, - ) - visualization = proto.Field( - proto.MESSAGE, - number=11, - message='ExplanationMetadata.InputMetadata.Visualization', - ) - group_name = proto.Field( - proto.STRING, - number=12, - ) - - class OutputMetadata(proto.Message): - r"""Metadata of the prediction output to be explained. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - index_display_name_mapping (google.protobuf.struct_pb2.Value): - Static mapping between the index and display name. - - Use this if the outputs are a deterministic n-dimensional - array, e.g. a list of scores of all the classes in a - pre-defined order for a multi-classification Model. It's not - feasible if the outputs are non-deterministic, e.g. the - Model produces top-k classes or sort the outputs by their - values. - - The shape of the value must be an n-dimensional array of - strings. The number of dimensions must match that of the - outputs to be explained. The - [Attribution.output_display_name][google.cloud.aiplatform.v1beta1.Attribution.output_display_name] - is populated by locating in the mapping with - [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index]. - - This field is a member of `oneof`_ ``display_name_mapping``. - display_name_mapping_key (str): - Specify a field name in the prediction to look for the - display name. - - Use this if the prediction contains the display names for - the outputs. - - The display names in the prediction must have the same shape - of the outputs, so that it can be located by - [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index] - for a specific output. - - This field is a member of `oneof`_ ``display_name_mapping``. - output_tensor_name (str): - Name of the output tensor. Required and is - only applicable to Vertex AI provided images for - Tensorflow. - """ - - index_display_name_mapping = proto.Field( - proto.MESSAGE, - number=1, - oneof='display_name_mapping', - message=struct_pb2.Value, - ) - display_name_mapping_key = proto.Field( - proto.STRING, - number=2, - oneof='display_name_mapping', - ) - output_tensor_name = proto.Field( - proto.STRING, - number=3, - ) - - inputs = proto.MapField( - proto.STRING, - proto.MESSAGE, - number=1, - message=InputMetadata, - ) - outputs = proto.MapField( - proto.STRING, - proto.MESSAGE, - number=2, - message=OutputMetadata, - ) - feature_attributions_schema_uri = proto.Field( - proto.STRING, - number=3, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/feature.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/feature.py deleted file mode 100644 index 46bcb9022e..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/feature.py +++ /dev/null @@ -1,153 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import feature_monitoring_stats -from google.cloud.aiplatform_v1beta1.types import featurestore_monitoring -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'Feature', - }, -) - - -class Feature(proto.Message): - r"""Feature Metadata information that describes an attribute of - an entity type. For example, apple is an entity type, and color - is a feature that describes apple. - - Attributes: - name (str): - Immutable. Name of the Feature. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}`` - - The last part feature is assigned by the client. The feature - can be up to 64 characters long and can consist only of - ASCII Latin letters A-Z and a-z, underscore(_), and ASCII - digits 0-9 starting with a letter. The value will be unique - given an entity type. - description (str): - Description of the Feature. - value_type (google.cloud.aiplatform_v1beta1.types.Feature.ValueType): - Required. Immutable. Type of Feature value. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this EntityType - was created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this EntityType - was most recently updated. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.Feature.LabelsEntry]): - Optional. The labels with user-defined - metadata to organize your Features. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. - See https://goo.gl/xmQnxf for more information - on and examples of labels. No more than 64 user - labels can be associated with one Feature - (System labels are excluded)." - System reserved label keys are prefixed with - "aiplatform.googleapis.com/" and are immutable. - etag (str): - Used to perform a consistent read-modify- - rite updates. If not set, a blind "overwrite" - update happens. - monitoring_config (google.cloud.aiplatform_v1beta1.types.FeaturestoreMonitoringConfig): - Optional. The custom monitoring configuration for this - Feature, if not set, use the monitoring_config defined for - the EntityType this Feature belongs to. Only Features with - type - ([Feature.ValueType][google.cloud.aiplatform.v1beta1.Feature.ValueType]) - BOOL, STRING, DOUBLE or INT64 can enable monitoring. - - If this is populated with - [FeaturestoreMonitoringConfig.disabled][] = true, snapshot - analysis monitoring is disabled; if - [FeaturestoreMonitoringConfig.monitoring_interval][] - specified, snapshot analysis monitoring is enabled. - Otherwise, snapshot analysis monitoring config is same as - the EntityType's this Feature belongs to. - monitoring_stats (Sequence[google.cloud.aiplatform_v1beta1.types.FeatureStatsAnomaly]): - Output only. A list of historical [Snapshot - Analysis][FeaturestoreMonitoringConfig.SnapshotAnalysis] - stats requested by user, sorted by - [FeatureStatsAnomaly.start_time][google.cloud.aiplatform.v1beta1.FeatureStatsAnomaly.start_time] - descending. - """ - class ValueType(proto.Enum): - r"""An enum representing the value type of a feature.""" - VALUE_TYPE_UNSPECIFIED = 0 - BOOL = 1 - BOOL_ARRAY = 2 - DOUBLE = 3 - DOUBLE_ARRAY = 4 - INT64 = 9 - INT64_ARRAY = 10 - STRING = 11 - STRING_ARRAY = 12 - BYTES = 13 - - name = proto.Field( - proto.STRING, - number=1, - ) - description = proto.Field( - proto.STRING, - number=2, - ) - value_type = proto.Field( - proto.ENUM, - number=3, - enum=ValueType, - ) - create_time = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=5, - message=timestamp_pb2.Timestamp, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=6, - ) - etag = proto.Field( - proto.STRING, - number=7, - ) - monitoring_config = proto.Field( - proto.MESSAGE, - number=9, - message=featurestore_monitoring.FeaturestoreMonitoringConfig, - ) - monitoring_stats = proto.RepeatedField( - proto.MESSAGE, - number=10, - message=feature_monitoring_stats.FeatureStatsAnomaly, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/feature_monitoring_stats.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/feature_monitoring_stats.py deleted file mode 100644 index 48dd2b3b84..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/feature_monitoring_stats.py +++ /dev/null @@ -1,124 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'FeatureStatsAnomaly', - }, -) - - -class FeatureStatsAnomaly(proto.Message): - r"""Stats and Anomaly generated at specific timestamp for specific - Feature. The start_time and end_time are used to define the time - range of the dataset that current stats belongs to, e.g. prediction - traffic is bucketed into prediction datasets by time window. If the - Dataset is not defined by time window, start_time = end_time. - Timestamp of the stats and anomalies always refers to end_time. Raw - stats and anomalies are stored in stats_uri or anomaly_uri in the - tensorflow defined protos. Field data_stats contains almost - identical information with the raw stats in Vertex AI defined proto, - for UI to display. - - Attributes: - score (float): - Feature importance score, only populated when cross-feature - monitoring is enabled. For now only used to represent - feature attribution score within range [0, 1] for - [ModelDeploymentMonitoringObjectiveType.FEATURE_ATTRIBUTION_SKEW][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringObjectiveType.FEATURE_ATTRIBUTION_SKEW] - and - [ModelDeploymentMonitoringObjectiveType.FEATURE_ATTRIBUTION_DRIFT][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringObjectiveType.FEATURE_ATTRIBUTION_DRIFT]. - stats_uri (str): - Path of the stats file for current feature values in Cloud - Storage bucket. Format: - gs:////stats. Example: - gs://monitoring_bucket/feature_name/stats. Stats are stored - as binary format with Protobuf message - `tensorflow.metadata.v0.FeatureNameStatistics `__. - anomaly_uri (str): - Path of the anomaly file for current feature values in Cloud - Storage bucket. Format: - gs:////anomalies. Example: - gs://monitoring_bucket/feature_name/anomalies. Stats are - stored as binary format with Protobuf message Anoamlies are - stored as binary format with Protobuf message - [tensorflow.metadata.v0.AnomalyInfo] - (https://github.com/tensorflow/metadata/blob/master/tensorflow_metadata/proto/v0/anomalies.proto). - distribution_deviation (float): - Deviation from the current stats to baseline - stats. 1. For categorical feature, the - distribution distance is calculated by - L-inifinity norm. - 2. For numerical feature, the distribution - distance is calculated by Jensen–Shannon - divergence. - anomaly_detection_threshold (float): - This is the threshold used when detecting anomalies. The - threshold can be changed by user, so this one might be - different from - [ThresholdConfig.value][google.cloud.aiplatform.v1beta1.ThresholdConfig.value]. - start_time (google.protobuf.timestamp_pb2.Timestamp): - The start timestamp of window where stats were generated. - For objectives where time window doesn't make sense (e.g. - Featurestore Snapshot Monitoring), start_time is only used - to indicate the monitoring intervals, so it always equals to - (end_time - monitoring_interval). - end_time (google.protobuf.timestamp_pb2.Timestamp): - The end timestamp of window where stats were generated. For - objectives where time window doesn't make sense (e.g. - Featurestore Snapshot Monitoring), end_time indicates the - timestamp of the data used to generate stats (e.g. timestamp - we take snapshots for feature values). - """ - - score = proto.Field( - proto.DOUBLE, - number=1, - ) - stats_uri = proto.Field( - proto.STRING, - number=3, - ) - anomaly_uri = proto.Field( - proto.STRING, - number=4, - ) - distribution_deviation = proto.Field( - proto.DOUBLE, - number=5, - ) - anomaly_detection_threshold = proto.Field( - proto.DOUBLE, - number=9, - ) - start_time = proto.Field( - proto.MESSAGE, - number=7, - message=timestamp_pb2.Timestamp, - ) - end_time = proto.Field( - proto.MESSAGE, - number=8, - message=timestamp_pb2.Timestamp, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/feature_selector.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/feature_selector.py deleted file mode 100644 index 8bbcc21703..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/feature_selector.py +++ /dev/null @@ -1,62 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'IdMatcher', - 'FeatureSelector', - }, -) - - -class IdMatcher(proto.Message): - r"""Matcher for Features of an EntityType by Feature ID. - - Attributes: - ids (Sequence[str]): - Required. The following are accepted as ``ids``: - - - A single-element list containing only ``*``, which - selects all Features in the target EntityType, or - - A list containing only Feature IDs, which selects only - Features with those IDs in the target EntityType. - """ - - ids = proto.RepeatedField( - proto.STRING, - number=1, - ) - - -class FeatureSelector(proto.Message): - r"""Selector for Features of an EntityType. - - Attributes: - id_matcher (google.cloud.aiplatform_v1beta1.types.IdMatcher): - Required. Matches Features based on ID. - """ - - id_matcher = proto.Field( - proto.MESSAGE, - number=1, - message='IdMatcher', - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore.py deleted file mode 100644 index b5810333aa..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore.py +++ /dev/null @@ -1,137 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'Featurestore', - }, -) - - -class Featurestore(proto.Message): - r"""Vertex AI Feature Store provides a centralized repository for - organizing, storing, and serving ML features. The Featurestore - is a top-level container for your features and their values. - - Attributes: - name (str): - Output only. Name of the Featurestore. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}`` - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Featurestore - was created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Featurestore - was last updated. - etag (str): - Optional. Used to perform consistent read- - odify-write updates. If not set, a blind - "overwrite" update happens. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.Featurestore.LabelsEntry]): - Optional. The labels with user-defined - metadata to organize your Featurestore. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. - See https://goo.gl/xmQnxf for more information - on and examples of labels. No more than 64 user - labels can be associated with one - Featurestore(System labels are excluded)." - System reserved label keys are prefixed with - "aiplatform.googleapis.com/" and are immutable. - online_serving_config (google.cloud.aiplatform_v1beta1.types.Featurestore.OnlineServingConfig): - Required. Config for online serving - resources. - state (google.cloud.aiplatform_v1beta1.types.Featurestore.State): - Output only. State of the featurestore. - encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec): - Optional. Customer-managed encryption key - spec for data storage. If set, both of the - online and offline data storage will be secured - by this key. - """ - class State(proto.Enum): - r"""Possible states a Featurestore can have.""" - STATE_UNSPECIFIED = 0 - STABLE = 1 - UPDATING = 2 - - class OnlineServingConfig(proto.Message): - r"""OnlineServingConfig specifies the details for provisioning - online serving resources. - - Attributes: - fixed_node_count (int): - The number of nodes for each cluster. The - number of nodes will not scale automatically but - can be scaled manually by providing different - values when updating. - """ - - fixed_node_count = proto.Field( - proto.INT32, - number=2, - ) - - name = proto.Field( - proto.STRING, - number=1, - ) - create_time = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - etag = proto.Field( - proto.STRING, - number=5, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=6, - ) - online_serving_config = proto.Field( - proto.MESSAGE, - number=7, - message=OnlineServingConfig, - ) - state = proto.Field( - proto.ENUM, - number=8, - enum=State, - ) - encryption_spec = proto.Field( - proto.MESSAGE, - number=10, - message=gca_encryption_spec.EncryptionSpec, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore_monitoring.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore_monitoring.py deleted file mode 100644 index 7dbf6de49b..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore_monitoring.py +++ /dev/null @@ -1,94 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import duration_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'FeaturestoreMonitoringConfig', - }, -) - - -class FeaturestoreMonitoringConfig(proto.Message): - r"""Configuration of how features in Featurestore are monitored. - - Attributes: - snapshot_analysis (google.cloud.aiplatform_v1beta1.types.FeaturestoreMonitoringConfig.SnapshotAnalysis): - The config for Snapshot Analysis Based - Feature Monitoring. - """ - - class SnapshotAnalysis(proto.Message): - r"""Configuration of the Featurestore's Snapshot Analysis Based - Monitoring. This type of analysis generates statistics for each - Feature based on a snapshot of the latest feature value of each - entities every monitoring_interval. - - Attributes: - disabled (bool): - The monitoring schedule for snapshot analysis. For - EntityType-level config: unset / disabled = true indicates - disabled by default for Features under it; otherwise by - default enable snapshot analysis monitoring with - monitoring_interval for Features under it. Feature-level - config: disabled = true indicates disabled regardless of the - EntityType-level config; unset monitoring_interval indicates - going with EntityType-level config; otherwise run snapshot - analysis monitoring with monitoring_interval regardless of - the EntityType-level config. Explicitly Disable the snapshot - analysis based monitoring. - monitoring_interval (google.protobuf.duration_pb2.Duration): - Configuration of the snapshot analysis based - monitoring pipeline running interval. The value - is rolled up to full day. - monitoring_interval_days (int): - Configuration of the snapshot analysis based monitoring - pipeline running interval. The value indicates number of - days. If both - [FeaturestoreMonitoringConfig.SnapshotAnalysis.monitoring_interval_days][google.cloud.aiplatform.v1beta1.FeaturestoreMonitoringConfig.SnapshotAnalysis.monitoring_interval_days] - and - [FeaturestoreMonitoringConfig.SnapshotAnalysis.monitoring_interval][google.cloud.aiplatform.v1beta1.FeaturestoreMonitoringConfig.SnapshotAnalysis.monitoring_interval] - are set when creating/updating EntityTypes/Features, - [FeaturestoreMonitoringConfig.SnapshotAnalysis.monitoring_interval_days][google.cloud.aiplatform.v1beta1.FeaturestoreMonitoringConfig.SnapshotAnalysis.monitoring_interval_days] - will be used. - """ - - disabled = proto.Field( - proto.BOOL, - number=1, - ) - monitoring_interval = proto.Field( - proto.MESSAGE, - number=2, - message=duration_pb2.Duration, - ) - monitoring_interval_days = proto.Field( - proto.INT32, - number=3, - ) - - snapshot_analysis = proto.Field( - proto.MESSAGE, - number=1, - message=SnapshotAnalysis, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore_online_service.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore_online_service.py deleted file mode 100644 index facf970e0d..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore_online_service.py +++ /dev/null @@ -1,382 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import feature_selector as gca_feature_selector -from google.cloud.aiplatform_v1beta1.types import types -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'ReadFeatureValuesRequest', - 'ReadFeatureValuesResponse', - 'StreamingReadFeatureValuesRequest', - 'FeatureValue', - 'FeatureValueList', - }, -) - - -class ReadFeatureValuesRequest(proto.Message): - r"""Request message for - [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.ReadFeatureValues]. - - Attributes: - entity_type (str): - Required. The resource name of the EntityType for the entity - being read. Value format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}``. - For example, for a machine learning model predicting user - clicks on a website, an EntityType ID could be ``user``. - entity_id (str): - Required. ID for a specific entity. For example, for a - machine learning model predicting user clicks on a website, - an entity ID could be ``user_123``. - feature_selector (google.cloud.aiplatform_v1beta1.types.FeatureSelector): - Required. Selector choosing Features of the - target EntityType. - """ - - entity_type = proto.Field( - proto.STRING, - number=1, - ) - entity_id = proto.Field( - proto.STRING, - number=2, - ) - feature_selector = proto.Field( - proto.MESSAGE, - number=3, - message=gca_feature_selector.FeatureSelector, - ) - - -class ReadFeatureValuesResponse(proto.Message): - r"""Response message for - [FeaturestoreOnlineServingService.ReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.ReadFeatureValues]. - - Attributes: - header (google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesResponse.Header): - Response header. - entity_view (google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesResponse.EntityView): - Entity view with Feature values. This may be - the entity in the Featurestore if values for all - Features were requested, or a projection of the - entity in the Featurestore if values for only - some Features were requested. - """ - - class FeatureDescriptor(proto.Message): - r"""Metadata for requested Features. - - Attributes: - id (str): - Feature ID. - """ - - id = proto.Field( - proto.STRING, - number=1, - ) - - class Header(proto.Message): - r"""Response header with metadata for the requested - [ReadFeatureValuesRequest.entity_type][google.cloud.aiplatform.v1beta1.ReadFeatureValuesRequest.entity_type] - and Features. - - Attributes: - entity_type (str): - The resource name of the EntityType from the - [ReadFeatureValuesRequest][google.cloud.aiplatform.v1beta1.ReadFeatureValuesRequest]. - Value format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}``. - feature_descriptors (Sequence[google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesResponse.FeatureDescriptor]): - List of Feature metadata corresponding to each piece of - [ReadFeatureValuesResponse.data][]. - """ - - entity_type = proto.Field( - proto.STRING, - number=1, - ) - feature_descriptors = proto.RepeatedField( - proto.MESSAGE, - number=2, - message='ReadFeatureValuesResponse.FeatureDescriptor', - ) - - class EntityView(proto.Message): - r"""Entity view with Feature values. - - Attributes: - entity_id (str): - ID of the requested entity. - data (Sequence[google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesResponse.EntityView.Data]): - Each piece of data holds the k requested values for one - requested Feature. If no values for the requested Feature - exist, the corresponding cell will be empty. This has the - same size and is in the same order as the features from the - header - [ReadFeatureValuesResponse.header][google.cloud.aiplatform.v1beta1.ReadFeatureValuesResponse.header]. - """ - - class Data(proto.Message): - r"""Container to hold value(s), successive in time, for one - Feature from the request. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - value (google.cloud.aiplatform_v1beta1.types.FeatureValue): - Feature value if a single value is requested. - - This field is a member of `oneof`_ ``data``. - values (google.cloud.aiplatform_v1beta1.types.FeatureValueList): - Feature values list if values, successive in - time, are requested. If the requested number of - values is greater than the number of existing - Feature values, nonexistent values are omitted - instead of being returned as empty. - - This field is a member of `oneof`_ ``data``. - """ - - value = proto.Field( - proto.MESSAGE, - number=1, - oneof='data', - message='FeatureValue', - ) - values = proto.Field( - proto.MESSAGE, - number=2, - oneof='data', - message='FeatureValueList', - ) - - entity_id = proto.Field( - proto.STRING, - number=1, - ) - data = proto.RepeatedField( - proto.MESSAGE, - number=2, - message='ReadFeatureValuesResponse.EntityView.Data', - ) - - header = proto.Field( - proto.MESSAGE, - number=1, - message=Header, - ) - entity_view = proto.Field( - proto.MESSAGE, - number=2, - message=EntityView, - ) - - -class StreamingReadFeatureValuesRequest(proto.Message): - r"""Request message for - [FeaturestoreOnlineServingService.StreamingFeatureValuesRead][]. - - Attributes: - entity_type (str): - Required. The resource name of the entities' type. Value - format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}``. - For example, for a machine learning model predicting user - clicks on a website, an EntityType ID could be ``user``. - entity_ids (Sequence[str]): - Required. IDs of entities to read Feature values of. The - maximum number of IDs is 100. For example, for a machine - learning model predicting user clicks on a website, an - entity ID could be ``user_123``. - feature_selector (google.cloud.aiplatform_v1beta1.types.FeatureSelector): - Required. Selector choosing Features of the - target EntityType. Feature IDs will be - deduplicated. - """ - - entity_type = proto.Field( - proto.STRING, - number=1, - ) - entity_ids = proto.RepeatedField( - proto.STRING, - number=2, - ) - feature_selector = proto.Field( - proto.MESSAGE, - number=3, - message=gca_feature_selector.FeatureSelector, - ) - - -class FeatureValue(proto.Message): - r"""Value for a feature. - NEXT ID: 15 - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - bool_value (bool): - Bool type feature value. - - This field is a member of `oneof`_ ``value``. - double_value (float): - Double type feature value. - - This field is a member of `oneof`_ ``value``. - int64_value (int): - Int64 feature value. - - This field is a member of `oneof`_ ``value``. - string_value (str): - String feature value. - - This field is a member of `oneof`_ ``value``. - bool_array_value (google.cloud.aiplatform_v1beta1.types.BoolArray): - A list of bool type feature value. - - This field is a member of `oneof`_ ``value``. - double_array_value (google.cloud.aiplatform_v1beta1.types.DoubleArray): - A list of double type feature value. - - This field is a member of `oneof`_ ``value``. - int64_array_value (google.cloud.aiplatform_v1beta1.types.Int64Array): - A list of int64 type feature value. - - This field is a member of `oneof`_ ``value``. - string_array_value (google.cloud.aiplatform_v1beta1.types.StringArray): - A list of string type feature value. - - This field is a member of `oneof`_ ``value``. - bytes_value (bytes): - Bytes feature value. - - This field is a member of `oneof`_ ``value``. - metadata (google.cloud.aiplatform_v1beta1.types.FeatureValue.Metadata): - Metadata of feature value. - """ - - class Metadata(proto.Message): - r"""Metadata of feature value. - - Attributes: - generate_time (google.protobuf.timestamp_pb2.Timestamp): - Feature generation timestamp. Typically, it - is provided by user at feature ingestion time. - If not, feature store will use the system - timestamp when the data is ingested into feature - store. For streaming ingestion, the time, - aligned by days, must be no older than five - years (1825 days) and no later than one year - (366 days) in the future. - """ - - generate_time = proto.Field( - proto.MESSAGE, - number=1, - message=timestamp_pb2.Timestamp, - ) - - bool_value = proto.Field( - proto.BOOL, - number=1, - oneof='value', - ) - double_value = proto.Field( - proto.DOUBLE, - number=2, - oneof='value', - ) - int64_value = proto.Field( - proto.INT64, - number=5, - oneof='value', - ) - string_value = proto.Field( - proto.STRING, - number=6, - oneof='value', - ) - bool_array_value = proto.Field( - proto.MESSAGE, - number=7, - oneof='value', - message=types.BoolArray, - ) - double_array_value = proto.Field( - proto.MESSAGE, - number=8, - oneof='value', - message=types.DoubleArray, - ) - int64_array_value = proto.Field( - proto.MESSAGE, - number=11, - oneof='value', - message=types.Int64Array, - ) - string_array_value = proto.Field( - proto.MESSAGE, - number=12, - oneof='value', - message=types.StringArray, - ) - bytes_value = proto.Field( - proto.BYTES, - number=13, - oneof='value', - ) - metadata = proto.Field( - proto.MESSAGE, - number=14, - message=Metadata, - ) - - -class FeatureValueList(proto.Message): - r"""Container for list of values. - - Attributes: - values (Sequence[google.cloud.aiplatform_v1beta1.types.FeatureValue]): - A list of feature values. All of them should - be the same data type. - """ - - values = proto.RepeatedField( - proto.MESSAGE, - number=1, - message='FeatureValue', - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore_service.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore_service.py deleted file mode 100644 index 65ab4380d8..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/featurestore_service.py +++ /dev/null @@ -1,1653 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import entity_type as gca_entity_type -from google.cloud.aiplatform_v1beta1.types import feature as gca_feature -from google.cloud.aiplatform_v1beta1.types import feature_selector as gca_feature_selector -from google.cloud.aiplatform_v1beta1.types import featurestore as gca_featurestore -from google.cloud.aiplatform_v1beta1.types import io -from google.cloud.aiplatform_v1beta1.types import operation -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'CreateFeaturestoreRequest', - 'GetFeaturestoreRequest', - 'ListFeaturestoresRequest', - 'ListFeaturestoresResponse', - 'UpdateFeaturestoreRequest', - 'DeleteFeaturestoreRequest', - 'ImportFeatureValuesRequest', - 'ImportFeatureValuesResponse', - 'BatchReadFeatureValuesRequest', - 'ExportFeatureValuesRequest', - 'DestinationFeatureSetting', - 'FeatureValueDestination', - 'ExportFeatureValuesResponse', - 'BatchReadFeatureValuesResponse', - 'CreateEntityTypeRequest', - 'GetEntityTypeRequest', - 'ListEntityTypesRequest', - 'ListEntityTypesResponse', - 'UpdateEntityTypeRequest', - 'DeleteEntityTypeRequest', - 'CreateFeatureRequest', - 'BatchCreateFeaturesRequest', - 'BatchCreateFeaturesResponse', - 'GetFeatureRequest', - 'ListFeaturesRequest', - 'ListFeaturesResponse', - 'SearchFeaturesRequest', - 'SearchFeaturesResponse', - 'UpdateFeatureRequest', - 'DeleteFeatureRequest', - 'CreateFeaturestoreOperationMetadata', - 'UpdateFeaturestoreOperationMetadata', - 'ImportFeatureValuesOperationMetadata', - 'ExportFeatureValuesOperationMetadata', - 'BatchReadFeatureValuesOperationMetadata', - 'CreateEntityTypeOperationMetadata', - 'CreateFeatureOperationMetadata', - 'BatchCreateFeaturesOperationMetadata', - }, -) - - -class CreateFeaturestoreRequest(proto.Message): - r"""Request message for - [FeaturestoreService.CreateFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateFeaturestore]. - - Attributes: - parent (str): - Required. The resource name of the Location to create - Featurestores. Format: - ``projects/{project}/locations/{location}'`` - featurestore (google.cloud.aiplatform_v1beta1.types.Featurestore): - Required. The Featurestore to create. - featurestore_id (str): - Required. The ID to use for this Featurestore, which will - become the final component of the Featurestore's resource - name. - - This value may be up to 60 characters, and valid characters - are ``[a-z0-9_]``. The first character cannot be a number. - - The value must be unique within the project and location. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - featurestore = proto.Field( - proto.MESSAGE, - number=2, - message=gca_featurestore.Featurestore, - ) - featurestore_id = proto.Field( - proto.STRING, - number=3, - ) - - -class GetFeaturestoreRequest(proto.Message): - r"""Request message for - [FeaturestoreService.GetFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.GetFeaturestore]. - - Attributes: - name (str): - Required. The name of the Featurestore - resource. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListFeaturestoresRequest(proto.Message): - r"""Request message for - [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeaturestores]. - - Attributes: - parent (str): - Required. The resource name of the Location to list - Featurestores. Format: - ``projects/{project}/locations/{location}`` - filter (str): - Lists the featurestores that match the filter expression. - The following fields are supported: - - - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, - ``<=``, and ``>=`` comparisons. Values must be in RFC - 3339 format. - - ``update_time``: Supports ``=``, ``!=``, ``<``, ``>``, - ``<=``, and ``>=`` comparisons. Values must be in RFC - 3339 format. - - ``online_serving_config.fixed_node_count``: Supports - ``=``, ``!=``, ``<``, ``>``, ``<=``, and ``>=`` - comparisons. - - ``labels``: Supports key-value equality and key presence. - - Examples: - - - ``create_time > "2020-01-01" OR update_time > "2020-01-01"`` - Featurestores created or updated after 2020-01-01. - - ``labels.env = "prod"`` Featurestores with label "env" - set to "prod". - page_size (int): - The maximum number of Featurestores to - return. The service may return fewer than this - value. If unspecified, at most 100 Featurestores - will be returned. The maximum value is 100; any - value greater than 100 will be coerced to 100. - page_token (str): - A page token, received from a previous - [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeaturestores] - call. Provide this to retrieve the subsequent page. - - When paginating, all other parameters provided to - [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeaturestores] - must match the call that provided the page token. - order_by (str): - A comma-separated list of fields to order by, sorted in - ascending order. Use "desc" after a field name for - descending. Supported Fields: - - - ``create_time`` - - ``update_time`` - - ``online_serving_config.fixed_node_count`` - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - order_by = proto.Field( - proto.STRING, - number=5, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=6, - message=field_mask_pb2.FieldMask, - ) - - -class ListFeaturestoresResponse(proto.Message): - r"""Response message for - [FeaturestoreService.ListFeaturestores][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeaturestores]. - - Attributes: - featurestores (Sequence[google.cloud.aiplatform_v1beta1.types.Featurestore]): - The Featurestores matching the request. - next_page_token (str): - A token, which can be sent as - [ListFeaturestoresRequest.page_token][google.cloud.aiplatform.v1beta1.ListFeaturestoresRequest.page_token] - to retrieve the next page. If this field is omitted, there - are no subsequent pages. - """ - - @property - def raw_page(self): - return self - - featurestores = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_featurestore.Featurestore, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class UpdateFeaturestoreRequest(proto.Message): - r"""Request message for - [FeaturestoreService.UpdateFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateFeaturestore]. - - Attributes: - featurestore (google.cloud.aiplatform_v1beta1.types.Featurestore): - Required. The Featurestore's ``name`` field is used to - identify the Featurestore to be updated. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}`` - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Field mask is used to specify the fields to be overwritten - in the Featurestore resource by the update. The fields - specified in the update_mask are relative to the resource, - not the full request. A field will be overwritten if it is - in the mask. If the user does not provide a mask then only - the non-empty fields present in the request will be - overwritten. Set the update_mask to ``*`` to override all - fields. - - Updatable fields: - - - ``labels`` - - ``online_serving_config.fixed_node_count`` - """ - - featurestore = proto.Field( - proto.MESSAGE, - number=1, - message=gca_featurestore.Featurestore, - ) - update_mask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - - -class DeleteFeaturestoreRequest(proto.Message): - r"""Request message for - [FeaturestoreService.DeleteFeaturestore][google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteFeaturestore]. - - Attributes: - name (str): - Required. The name of the Featurestore to be deleted. - Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}`` - force (bool): - If set to true, any EntityTypes and Features - for this Featurestore will also be deleted. - (Otherwise, the request will only work if the - Featurestore has no EntityTypes.) - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - force = proto.Field( - proto.BOOL, - number=2, - ) - - -class ImportFeatureValuesRequest(proto.Message): - r"""Request message for - [FeaturestoreService.ImportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ImportFeatureValues]. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - avro_source (google.cloud.aiplatform_v1beta1.types.AvroSource): - - This field is a member of `oneof`_ ``source``. - bigquery_source (google.cloud.aiplatform_v1beta1.types.BigQuerySource): - - This field is a member of `oneof`_ ``source``. - csv_source (google.cloud.aiplatform_v1beta1.types.CsvSource): - - This field is a member of `oneof`_ ``source``. - feature_time_field (str): - Source column that holds the Feature - timestamp for all Feature values in each entity. - - This field is a member of `oneof`_ ``feature_time_source``. - feature_time (google.protobuf.timestamp_pb2.Timestamp): - Single Feature timestamp for all entities - being imported. The timestamp must not have - higher than millisecond precision. - - This field is a member of `oneof`_ ``feature_time_source``. - entity_type (str): - Required. The resource name of the EntityType grouping the - Features for which values are being imported. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}`` - entity_id_field (str): - Source column that holds entity IDs. If not provided, entity - IDs are extracted from the column named ``entity_id``. - feature_specs (Sequence[google.cloud.aiplatform_v1beta1.types.ImportFeatureValuesRequest.FeatureSpec]): - Required. Specifications defining which Feature values to - import from the entity. The request fails if no - feature_specs are provided, and having multiple - feature_specs for one Feature is not allowed. - disable_online_serving (bool): - If set, data will not be imported for online - serving. This is typically used for backfilling, - where Feature generation timestamps are not in - the timestamp range needed for online serving. - worker_count (int): - Specifies the number of workers that are used - to write data to the Featurestore. Consider the - online serving capacity that you require to - achieve the desired import throughput without - interfering with online serving. The value must - be positive, and less than or equal to 100. If - not set, defaults to using 1 worker. The low - count ensures minimal impact on online serving - performance. - """ - - class FeatureSpec(proto.Message): - r"""Defines the Feature value(s) to import. - - Attributes: - id (str): - Required. ID of the Feature to import values - of. This Feature must exist in the target - EntityType, or the request will fail. - source_field (str): - Source column to get the Feature values from. - If not set, uses the column with the same name - as the Feature ID. - """ - - id = proto.Field( - proto.STRING, - number=1, - ) - source_field = proto.Field( - proto.STRING, - number=2, - ) - - avro_source = proto.Field( - proto.MESSAGE, - number=2, - oneof='source', - message=io.AvroSource, - ) - bigquery_source = proto.Field( - proto.MESSAGE, - number=3, - oneof='source', - message=io.BigQuerySource, - ) - csv_source = proto.Field( - proto.MESSAGE, - number=4, - oneof='source', - message=io.CsvSource, - ) - feature_time_field = proto.Field( - proto.STRING, - number=6, - oneof='feature_time_source', - ) - feature_time = proto.Field( - proto.MESSAGE, - number=7, - oneof='feature_time_source', - message=timestamp_pb2.Timestamp, - ) - entity_type = proto.Field( - proto.STRING, - number=1, - ) - entity_id_field = proto.Field( - proto.STRING, - number=5, - ) - feature_specs = proto.RepeatedField( - proto.MESSAGE, - number=8, - message=FeatureSpec, - ) - disable_online_serving = proto.Field( - proto.BOOL, - number=9, - ) - worker_count = proto.Field( - proto.INT32, - number=11, - ) - - -class ImportFeatureValuesResponse(proto.Message): - r"""Response message for - [FeaturestoreService.ImportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ImportFeatureValues]. - - Attributes: - imported_entity_count (int): - Number of entities that have been imported by - the operation. - imported_feature_value_count (int): - Number of Feature values that have been - imported by the operation. - invalid_row_count (int): - The number of rows in input source that weren't imported due - to either - - - Not having any featureValues. - - Having a null entityId. - - Having a null timestamp. - - Not being parsable (applicable for CSV sources). - """ - - imported_entity_count = proto.Field( - proto.INT64, - number=1, - ) - imported_feature_value_count = proto.Field( - proto.INT64, - number=2, - ) - invalid_row_count = proto.Field( - proto.INT64, - number=6, - ) - - -class BatchReadFeatureValuesRequest(proto.Message): - r"""Request message for - [FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchReadFeatureValues]. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - csv_read_instances (google.cloud.aiplatform_v1beta1.types.CsvSource): - Each read instance consists of exactly one read timestamp - and one or more entity IDs identifying entities of the - corresponding EntityTypes whose Features are requested. - - Each output instance contains Feature values of requested - entities concatenated together as of the read time. - - An example read instance may be - ``foo_entity_id, bar_entity_id, 2020-01-01T10:00:00.123Z``. - - An example output instance may be - ``foo_entity_id, bar_entity_id, 2020-01-01T10:00:00.123Z, foo_entity_feature1_value, bar_entity_feature2_value``. - - Timestamp in each read instance must be millisecond-aligned. - - ``csv_read_instances`` are read instances stored in a - plain-text CSV file. The header should be: - [ENTITY_TYPE_ID1], [ENTITY_TYPE_ID2], ..., timestamp - - The columns can be in any order. - - Values in the timestamp column must use the RFC 3339 format, - e.g. ``2012-07-30T10:43:17.123Z``. - - This field is a member of `oneof`_ ``read_option``. - bigquery_read_instances (google.cloud.aiplatform_v1beta1.types.BigQuerySource): - Similar to csv_read_instances, but from BigQuery source. - - This field is a member of `oneof`_ ``read_option``. - featurestore (str): - Required. The resource name of the Featurestore from which - to query Feature values. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}`` - destination (google.cloud.aiplatform_v1beta1.types.FeatureValueDestination): - Required. Specifies output location and - format. - pass_through_fields (Sequence[google.cloud.aiplatform_v1beta1.types.BatchReadFeatureValuesRequest.PassThroughField]): - When not empty, the specified fields in the - \*_read_instances source will be joined as-is in the output, - in addition to those fields from the Featurestore Entity. - - For BigQuery source, the type of the pass-through values - will be automatically inferred. For CSV source, the - pass-through values will be passed as opaque bytes. - entity_type_specs (Sequence[google.cloud.aiplatform_v1beta1.types.BatchReadFeatureValuesRequest.EntityTypeSpec]): - Required. Specifies EntityType grouping Features to read - values of and settings. Each EntityType referenced in - [BatchReadFeatureValuesRequest.entity_type_specs] must have - a column specifying entity IDs in the EntityType in - [BatchReadFeatureValuesRequest.request][] . - """ - - class PassThroughField(proto.Message): - r"""Describe pass-through fields in read_instance source. - - Attributes: - field_name (str): - Required. The name of the field in the CSV header or the - name of the column in BigQuery table. The naming restriction - is the same as - [Feature.name][google.cloud.aiplatform.v1beta1.Feature.name]. - """ - - field_name = proto.Field( - proto.STRING, - number=1, - ) - - class EntityTypeSpec(proto.Message): - r"""Selects Features of an EntityType to read values of and - specifies read settings. - - Attributes: - entity_type_id (str): - Required. ID of the EntityType to select Features. The - EntityType id is the - [entity_type_id][google.cloud.aiplatform.v1beta1.CreateEntityTypeRequest.entity_type_id] - specified during EntityType creation. - feature_selector (google.cloud.aiplatform_v1beta1.types.FeatureSelector): - Required. Selectors choosing which Feature - values to read from the EntityType. - settings (Sequence[google.cloud.aiplatform_v1beta1.types.DestinationFeatureSetting]): - Per-Feature settings for the batch read. - """ - - entity_type_id = proto.Field( - proto.STRING, - number=1, - ) - feature_selector = proto.Field( - proto.MESSAGE, - number=2, - message=gca_feature_selector.FeatureSelector, - ) - settings = proto.RepeatedField( - proto.MESSAGE, - number=3, - message='DestinationFeatureSetting', - ) - - csv_read_instances = proto.Field( - proto.MESSAGE, - number=3, - oneof='read_option', - message=io.CsvSource, - ) - bigquery_read_instances = proto.Field( - proto.MESSAGE, - number=5, - oneof='read_option', - message=io.BigQuerySource, - ) - featurestore = proto.Field( - proto.STRING, - number=1, - ) - destination = proto.Field( - proto.MESSAGE, - number=4, - message='FeatureValueDestination', - ) - pass_through_fields = proto.RepeatedField( - proto.MESSAGE, - number=8, - message=PassThroughField, - ) - entity_type_specs = proto.RepeatedField( - proto.MESSAGE, - number=7, - message=EntityTypeSpec, - ) - - -class ExportFeatureValuesRequest(proto.Message): - r"""Request message for - [FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ExportFeatureValues]. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - snapshot_export (google.cloud.aiplatform_v1beta1.types.ExportFeatureValuesRequest.SnapshotExport): - Exports the latest Feature values of all - entities of the EntityType within a time range. - - This field is a member of `oneof`_ ``mode``. - full_export (google.cloud.aiplatform_v1beta1.types.ExportFeatureValuesRequest.FullExport): - Exports all historical values of all entities - of the EntityType within a time range - - This field is a member of `oneof`_ ``mode``. - entity_type (str): - Required. The resource name of the EntityType from which to - export Feature values. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - destination (google.cloud.aiplatform_v1beta1.types.FeatureValueDestination): - Required. Specifies destination location and - format. - feature_selector (google.cloud.aiplatform_v1beta1.types.FeatureSelector): - Required. Selects Features to export values - of. - settings (Sequence[google.cloud.aiplatform_v1beta1.types.DestinationFeatureSetting]): - Per-Feature export settings. - """ - - class SnapshotExport(proto.Message): - r"""Describes exporting the latest Feature values of all entities of the - EntityType between [start_time, snapshot_time]. - - Attributes: - snapshot_time (google.protobuf.timestamp_pb2.Timestamp): - Exports Feature values as of this timestamp. - If not set, retrieve values as of now. - Timestamp, if present, must not have higher than - millisecond precision. - start_time (google.protobuf.timestamp_pb2.Timestamp): - Excludes Feature values with feature - generation timestamp before this timestamp. If - not set, retrieve oldest values kept in Feature - Store. Timestamp, if present, must not have - higher than millisecond precision. - """ - - snapshot_time = proto.Field( - proto.MESSAGE, - number=1, - message=timestamp_pb2.Timestamp, - ) - start_time = proto.Field( - proto.MESSAGE, - number=2, - message=timestamp_pb2.Timestamp, - ) - - class FullExport(proto.Message): - r"""Describes exporting all historical Feature values of all entities of - the EntityType between [start_time, end_time]. - - Attributes: - start_time (google.protobuf.timestamp_pb2.Timestamp): - Excludes Feature values with feature - generation timestamp before this timestamp. If - not set, retrieve oldest values kept in Feature - Store. Timestamp, if present, must not have - higher than millisecond precision. - end_time (google.protobuf.timestamp_pb2.Timestamp): - Exports Feature values as of this timestamp. - If not set, retrieve values as of now. - Timestamp, if present, must not have higher than - millisecond precision. - """ - - start_time = proto.Field( - proto.MESSAGE, - number=2, - message=timestamp_pb2.Timestamp, - ) - end_time = proto.Field( - proto.MESSAGE, - number=1, - message=timestamp_pb2.Timestamp, - ) - - snapshot_export = proto.Field( - proto.MESSAGE, - number=3, - oneof='mode', - message=SnapshotExport, - ) - full_export = proto.Field( - proto.MESSAGE, - number=7, - oneof='mode', - message=FullExport, - ) - entity_type = proto.Field( - proto.STRING, - number=1, - ) - destination = proto.Field( - proto.MESSAGE, - number=4, - message='FeatureValueDestination', - ) - feature_selector = proto.Field( - proto.MESSAGE, - number=5, - message=gca_feature_selector.FeatureSelector, - ) - settings = proto.RepeatedField( - proto.MESSAGE, - number=6, - message='DestinationFeatureSetting', - ) - - -class DestinationFeatureSetting(proto.Message): - r""" - - Attributes: - feature_id (str): - Required. The ID of the Feature to apply the - setting to. - destination_field (str): - Specify the field name in the export - destination. If not specified, Feature ID is - used. - """ - - feature_id = proto.Field( - proto.STRING, - number=1, - ) - destination_field = proto.Field( - proto.STRING, - number=2, - ) - - -class FeatureValueDestination(proto.Message): - r"""A destination location for Feature values and format. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - bigquery_destination (google.cloud.aiplatform_v1beta1.types.BigQueryDestination): - Output in BigQuery format. - [BigQueryDestination.output_uri][google.cloud.aiplatform.v1beta1.BigQueryDestination.output_uri] - in - [FeatureValueDestination.bigquery_destination][google.cloud.aiplatform.v1beta1.FeatureValueDestination.bigquery_destination] - must refer to a table. - - This field is a member of `oneof`_ ``destination``. - tfrecord_destination (google.cloud.aiplatform_v1beta1.types.TFRecordDestination): - Output in TFRecord format. - - Below are the mapping from Feature value type in - Featurestore to Feature value type in TFRecord: - - :: - - Value type in Featurestore | Value type in TFRecord - DOUBLE, DOUBLE_ARRAY | FLOAT_LIST - INT64, INT64_ARRAY | INT64_LIST - STRING, STRING_ARRAY, BYTES | BYTES_LIST - true -> byte_string("true"), false -> byte_string("false") - BOOL, BOOL_ARRAY (true, false) | BYTES_LIST - - This field is a member of `oneof`_ ``destination``. - csv_destination (google.cloud.aiplatform_v1beta1.types.CsvDestination): - Output in CSV format. Array Feature value - types are not allowed in CSV format. - - This field is a member of `oneof`_ ``destination``. - """ - - bigquery_destination = proto.Field( - proto.MESSAGE, - number=1, - oneof='destination', - message=io.BigQueryDestination, - ) - tfrecord_destination = proto.Field( - proto.MESSAGE, - number=2, - oneof='destination', - message=io.TFRecordDestination, - ) - csv_destination = proto.Field( - proto.MESSAGE, - number=3, - oneof='destination', - message=io.CsvDestination, - ) - - -class ExportFeatureValuesResponse(proto.Message): - r"""Response message for - [FeaturestoreService.ExportFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.ExportFeatureValues]. - - """ - - -class BatchReadFeatureValuesResponse(proto.Message): - r"""Response message for - [FeaturestoreService.BatchReadFeatureValues][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchReadFeatureValues]. - - """ - - -class CreateEntityTypeRequest(proto.Message): - r"""Request message for - [FeaturestoreService.CreateEntityType][google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateEntityType]. - - Attributes: - parent (str): - Required. The resource name of the Featurestore to create - EntityTypes. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}`` - entity_type (google.cloud.aiplatform_v1beta1.types.EntityType): - The EntityType to create. - entity_type_id (str): - Required. The ID to use for the EntityType, which will - become the final component of the EntityType's resource - name. - - This value may be up to 60 characters, and valid characters - are ``[a-z0-9_]``. The first character cannot be a number. - - The value must be unique within a featurestore. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - entity_type = proto.Field( - proto.MESSAGE, - number=2, - message=gca_entity_type.EntityType, - ) - entity_type_id = proto.Field( - proto.STRING, - number=3, - ) - - -class GetEntityTypeRequest(proto.Message): - r"""Request message for - [FeaturestoreService.GetEntityType][google.cloud.aiplatform.v1beta1.FeaturestoreService.GetEntityType]. - - Attributes: - name (str): - Required. The name of the EntityType resource. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListEntityTypesRequest(proto.Message): - r"""Request message for - [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListEntityTypes]. - - Attributes: - parent (str): - Required. The resource name of the Featurestore to list - EntityTypes. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}`` - filter (str): - Lists the EntityTypes that match the filter expression. The - following filters are supported: - - - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, - ``>=``, and ``<=`` comparisons. Values must be in RFC - 3339 format. - - ``update_time``: Supports ``=``, ``!=``, ``<``, ``>``, - ``>=``, and ``<=`` comparisons. Values must be in RFC - 3339 format. - - ``labels``: Supports key-value equality as well as key - presence. - - Examples: - - - ``create_time > \"2020-01-31T15:30:00.000000Z\" OR update_time > \"2020-01-31T15:30:00.000000Z\"`` - --> EntityTypes created or updated after - 2020-01-31T15:30:00.000000Z. - - ``labels.active = yes AND labels.env = prod`` --> - EntityTypes having both (active: yes) and (env: prod) - labels. - - ``labels.env: *`` --> Any EntityType which has a label - with 'env' as the key. - page_size (int): - The maximum number of EntityTypes to return. - The service may return fewer than this value. If - unspecified, at most 1000 EntityTypes will be - returned. The maximum value is 1000; any value - greater than 1000 will be coerced to 1000. - page_token (str): - A page token, received from a previous - [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListEntityTypes] - call. Provide this to retrieve the subsequent page. - - When paginating, all other parameters provided to - [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListEntityTypes] - must match the call that provided the page token. - order_by (str): - A comma-separated list of fields to order by, sorted in - ascending order. Use "desc" after a field name for - descending. - - Supported fields: - - - ``entity_type_id`` - - ``create_time`` - - ``update_time`` - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - order_by = proto.Field( - proto.STRING, - number=5, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=6, - message=field_mask_pb2.FieldMask, - ) - - -class ListEntityTypesResponse(proto.Message): - r"""Response message for - [FeaturestoreService.ListEntityTypes][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListEntityTypes]. - - Attributes: - entity_types (Sequence[google.cloud.aiplatform_v1beta1.types.EntityType]): - The EntityTypes matching the request. - next_page_token (str): - A token, which can be sent as - [ListEntityTypesRequest.page_token][google.cloud.aiplatform.v1beta1.ListEntityTypesRequest.page_token] - to retrieve the next page. If this field is omitted, there - are no subsequent pages. - """ - - @property - def raw_page(self): - return self - - entity_types = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_entity_type.EntityType, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class UpdateEntityTypeRequest(proto.Message): - r"""Request message for - [FeaturestoreService.UpdateEntityType][google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateEntityType]. - - Attributes: - entity_type (google.cloud.aiplatform_v1beta1.types.EntityType): - Required. The EntityType's ``name`` field is used to - identify the EntityType to be updated. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Field mask is used to specify the fields to be overwritten - in the EntityType resource by the update. The fields - specified in the update_mask are relative to the resource, - not the full request. A field will be overwritten if it is - in the mask. If the user does not provide a mask then only - the non-empty fields present in the request will be - overwritten. Set the update_mask to ``*`` to override all - fields. - - Updatable fields: - - - ``description`` - - ``labels`` - - ``monitoring_config.snapshot_analysis.disabled`` - - ``monitoring_config.snapshot_analysis.monitoring_interval`` - """ - - entity_type = proto.Field( - proto.MESSAGE, - number=1, - message=gca_entity_type.EntityType, - ) - update_mask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - - -class DeleteEntityTypeRequest(proto.Message): - r"""Request message for [FeaturestoreService.DeleteEntityTypes][]. - - Attributes: - name (str): - Required. The name of the EntityType to be deleted. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - force (bool): - If set to true, any Features for this - EntityType will also be deleted. (Otherwise, the - request will only work if the EntityType has no - Features.) - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - force = proto.Field( - proto.BOOL, - number=2, - ) - - -class CreateFeatureRequest(proto.Message): - r"""Request message for - [FeaturestoreService.CreateFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateFeature]. - - Attributes: - parent (str): - Required. The resource name of the EntityType to create a - Feature. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - feature (google.cloud.aiplatform_v1beta1.types.Feature): - Required. The Feature to create. - feature_id (str): - Required. The ID to use for the Feature, which will become - the final component of the Feature's resource name. - - This value may be up to 60 characters, and valid characters - are ``[a-z0-9_]``. The first character cannot be a number. - - The value must be unique within an EntityType. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - feature = proto.Field( - proto.MESSAGE, - number=2, - message=gca_feature.Feature, - ) - feature_id = proto.Field( - proto.STRING, - number=3, - ) - - -class BatchCreateFeaturesRequest(proto.Message): - r"""Request message for - [FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchCreateFeatures]. - - Attributes: - parent (str): - Required. The resource name of the EntityType to create the - batch of Features under. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - requests (Sequence[google.cloud.aiplatform_v1beta1.types.CreateFeatureRequest]): - Required. The request message specifying the Features to - create. All Features must be created under the same parent - EntityType. The ``parent`` field in each child request - message can be omitted. If ``parent`` is set in a child - request, then the value must match the ``parent`` value in - this request message. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - requests = proto.RepeatedField( - proto.MESSAGE, - number=2, - message='CreateFeatureRequest', - ) - - -class BatchCreateFeaturesResponse(proto.Message): - r"""Response message for - [FeaturestoreService.BatchCreateFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchCreateFeatures]. - - Attributes: - features (Sequence[google.cloud.aiplatform_v1beta1.types.Feature]): - The Features created. - """ - - features = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_feature.Feature, - ) - - -class GetFeatureRequest(proto.Message): - r"""Request message for - [FeaturestoreService.GetFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.GetFeature]. - - Attributes: - name (str): - Required. The name of the Feature resource. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListFeaturesRequest(proto.Message): - r"""Request message for - [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeatures]. - - Attributes: - parent (str): - Required. The resource name of the Location to list - Features. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}`` - filter (str): - Lists the Features that match the filter expression. The - following filters are supported: - - - ``value_type``: Supports = and != comparisons. - - ``create_time``: Supports =, !=, <, >, >=, and <= - comparisons. Values must be in RFC 3339 format. - - ``update_time``: Supports =, !=, <, >, >=, and <= - comparisons. Values must be in RFC 3339 format. - - ``labels``: Supports key-value equality as well as key - presence. - - Examples: - - - ``value_type = DOUBLE`` --> Features whose type is - DOUBLE. - - ``create_time > \"2020-01-31T15:30:00.000000Z\" OR update_time > \"2020-01-31T15:30:00.000000Z\"`` - --> EntityTypes created or updated after - 2020-01-31T15:30:00.000000Z. - - ``labels.active = yes AND labels.env = prod`` --> - Features having both (active: yes) and (env: prod) - labels. - - ``labels.env: *`` --> Any Feature which has a label with - 'env' as the key. - page_size (int): - The maximum number of Features to return. The - service may return fewer than this value. If - unspecified, at most 1000 Features will be - returned. The maximum value is 1000; any value - greater than 1000 will be coerced to 1000. - page_token (str): - A page token, received from a previous - [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeatures] - call. Provide this to retrieve the subsequent page. - - When paginating, all other parameters provided to - [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeatures] - must match the call that provided the page token. - order_by (str): - A comma-separated list of fields to order by, sorted in - ascending order. Use "desc" after a field name for - descending. Supported fields: - - - ``feature_id`` - - ``value_type`` - - ``create_time`` - - ``update_time`` - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - latest_stats_count (int): - If set, return the most recent - [ListFeaturesRequest.latest_stats_count][google.cloud.aiplatform.v1beta1.ListFeaturesRequest.latest_stats_count] - of stats for each Feature in response. Valid value is [0, - 10]. If number of stats exists < - [ListFeaturesRequest.latest_stats_count][google.cloud.aiplatform.v1beta1.ListFeaturesRequest.latest_stats_count], - return all existing stats. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - order_by = proto.Field( - proto.STRING, - number=5, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=6, - message=field_mask_pb2.FieldMask, - ) - latest_stats_count = proto.Field( - proto.INT32, - number=7, - ) - - -class ListFeaturesResponse(proto.Message): - r"""Response message for - [FeaturestoreService.ListFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeatures]. - - Attributes: - features (Sequence[google.cloud.aiplatform_v1beta1.types.Feature]): - The Features matching the request. - next_page_token (str): - A token, which can be sent as - [ListFeaturesRequest.page_token][google.cloud.aiplatform.v1beta1.ListFeaturesRequest.page_token] - to retrieve the next page. If this field is omitted, there - are no subsequent pages. - """ - - @property - def raw_page(self): - return self - - features = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_feature.Feature, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class SearchFeaturesRequest(proto.Message): - r"""Request message for - [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.SearchFeatures]. - - Attributes: - location (str): - Required. The resource name of the Location to search - Features. Format: - ``projects/{project}/locations/{location}`` - query (str): - Query string that is a conjunction of field-restricted - queries and/or field-restricted filters. Field-restricted - queries and filters can be combined using ``AND`` to form a - conjunction. - - A field query is in the form FIELD:QUERY. This implicitly - checks if QUERY exists as a substring within Feature's - FIELD. The QUERY and the FIELD are converted to a sequence - of words (i.e. tokens) for comparison. This is done by: - - - Removing leading/trailing whitespace and tokenizing the - search value. Characters that are not one of alphanumeric - ``[a-zA-Z0-9]``, underscore ``_``, or asterisk ``*`` are - treated as delimiters for tokens. ``*`` is treated as a - wildcard that matches characters within a token. - - Ignoring case. - - Prepending an asterisk to the first and appending an - asterisk to the last token in QUERY. - - A QUERY must be either a singular token or a phrase. A - phrase is one or multiple words enclosed in double quotation - marks ("). With phrases, the order of the words is - important. Words in the phrase must be matching in order and - consecutively. - - Supported FIELDs for field-restricted queries: - - - ``feature_id`` - - ``description`` - - ``entity_type_id`` - - Examples: - - - ``feature_id: foo`` --> Matches a Feature with ID - containing the substring ``foo`` (eg. ``foo``, - ``foofeature``, ``barfoo``). - - ``feature_id: foo*feature`` --> Matches a Feature with ID - containing the substring ``foo*feature`` (eg. - ``foobarfeature``). - - ``feature_id: foo AND description: bar`` --> Matches a - Feature with ID containing the substring ``foo`` and - description containing the substring ``bar``. - - Besides field queries, the following exact-match filters are - supported. The exact-match filters do not support wildcards. - Unlike field-restricted queries, exact-match filters are - case-sensitive. - - - ``feature_id``: Supports = comparisons. - - ``description``: Supports = comparisons. Multi-token - filters should be enclosed in quotes. - - ``entity_type_id``: Supports = comparisons. - - ``value_type``: Supports = and != comparisons. - - ``labels``: Supports key-value equality as well as key - presence. - - ``featurestore_id``: Supports = comparisons. - - Examples: - - - ``description = "foo bar"`` --> Any Feature with - description exactly equal to ``foo bar`` - - ``value_type = DOUBLE`` --> Features whose type is - DOUBLE. - - ``labels.active = yes AND labels.env = prod`` --> - Features having both (active: yes) and (env: prod) - labels. - - ``labels.env: *`` --> Any Feature which has a label with - ``env`` as the key. - page_size (int): - The maximum number of Features to return. The - service may return fewer than this value. If - unspecified, at most 100 Features will be - returned. The maximum value is 100; any value - greater than 100 will be coerced to 100. - page_token (str): - A page token, received from a previous - [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.SearchFeatures] - call. Provide this to retrieve the subsequent page. - - When paginating, all other parameters provided to - [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.SearchFeatures], - except ``page_size``, must match the call that provided the - page token. - """ - - location = proto.Field( - proto.STRING, - number=1, - ) - query = proto.Field( - proto.STRING, - number=3, - ) - page_size = proto.Field( - proto.INT32, - number=4, - ) - page_token = proto.Field( - proto.STRING, - number=5, - ) - - -class SearchFeaturesResponse(proto.Message): - r"""Response message for - [FeaturestoreService.SearchFeatures][google.cloud.aiplatform.v1beta1.FeaturestoreService.SearchFeatures]. - - Attributes: - features (Sequence[google.cloud.aiplatform_v1beta1.types.Feature]): - The Features matching the request. - - Fields returned: - - - ``name`` - - ``description`` - - ``labels`` - - ``create_time`` - - ``update_time`` - next_page_token (str): - A token, which can be sent as - [SearchFeaturesRequest.page_token][google.cloud.aiplatform.v1beta1.SearchFeaturesRequest.page_token] - to retrieve the next page. If this field is omitted, there - are no subsequent pages. - """ - - @property - def raw_page(self): - return self - - features = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_feature.Feature, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class UpdateFeatureRequest(proto.Message): - r"""Request message for - [FeaturestoreService.UpdateFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateFeature]. - - Attributes: - feature (google.cloud.aiplatform_v1beta1.types.Feature): - Required. The Feature's ``name`` field is used to identify - the Feature to be updated. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}`` - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Field mask is used to specify the fields to be overwritten - in the Features resource by the update. The fields specified - in the update_mask are relative to the resource, not the - full request. A field will be overwritten if it is in the - mask. If the user does not provide a mask then only the - non-empty fields present in the request will be overwritten. - Set the update_mask to ``*`` to override all fields. - - Updatable fields: - - - ``description`` - - ``labels`` - - ``monitoring_config.snapshot_analysis.disabled`` - - ``monitoring_config.snapshot_analysis.monitoring_interval`` - """ - - feature = proto.Field( - proto.MESSAGE, - number=1, - message=gca_feature.Feature, - ) - update_mask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - - -class DeleteFeatureRequest(proto.Message): - r"""Request message for - [FeaturestoreService.DeleteFeature][google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteFeature]. - - Attributes: - name (str): - Required. The name of the Features to be deleted. Format: - ``projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class CreateFeaturestoreOperationMetadata(proto.Message): - r"""Details of operations that perform create Featurestore. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - Operation metadata for Featurestore. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class UpdateFeaturestoreOperationMetadata(proto.Message): - r"""Details of operations that perform update Featurestore. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - Operation metadata for Featurestore. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class ImportFeatureValuesOperationMetadata(proto.Message): - r"""Details of operations that perform import Feature values. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - Operation metadata for Featurestore import - Feature values. - imported_entity_count (int): - Number of entities that have been imported by - the operation. - imported_feature_value_count (int): - Number of Feature values that have been - imported by the operation. - invalid_row_count (int): - The number of rows in input source that weren't imported due - to either - - - Not having any featureValues. - - Having a null entityId. - - Having a null timestamp. - - Not being parsable (applicable for CSV sources). - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - imported_entity_count = proto.Field( - proto.INT64, - number=2, - ) - imported_feature_value_count = proto.Field( - proto.INT64, - number=3, - ) - invalid_row_count = proto.Field( - proto.INT64, - number=6, - ) - - -class ExportFeatureValuesOperationMetadata(proto.Message): - r"""Details of operations that exports Features values. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - Operation metadata for Featurestore export - Feature values. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class BatchReadFeatureValuesOperationMetadata(proto.Message): - r"""Details of operations that batch reads Feature values. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - Operation metadata for Featurestore batch - read Features values. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class CreateEntityTypeOperationMetadata(proto.Message): - r"""Details of operations that perform create EntityType. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - Operation metadata for EntityType. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class CreateFeatureOperationMetadata(proto.Message): - r"""Details of operations that perform create Feature. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - Operation metadata for Feature. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class BatchCreateFeaturesOperationMetadata(proto.Message): - r"""Details of operations that perform batch create Features. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - Operation metadata for Feature. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/hyperparameter_tuning_job.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/hyperparameter_tuning_job.py deleted file mode 100644 index 138d0c58e4..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/hyperparameter_tuning_job.py +++ /dev/null @@ -1,182 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import custom_job -from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec -from google.cloud.aiplatform_v1beta1.types import job_state -from google.cloud.aiplatform_v1beta1.types import study -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'HyperparameterTuningJob', - }, -) - - -class HyperparameterTuningJob(proto.Message): - r"""Represents a HyperparameterTuningJob. A - HyperparameterTuningJob has a Study specification and multiple - CustomJobs with identical CustomJob specification. - - Attributes: - name (str): - Output only. Resource name of the - HyperparameterTuningJob. - display_name (str): - Required. The display name of the - HyperparameterTuningJob. The name can be up to - 128 characters long and can be consist of any - UTF-8 characters. - study_spec (google.cloud.aiplatform_v1beta1.types.StudySpec): - Required. Study configuration of the - HyperparameterTuningJob. - max_trial_count (int): - Required. The desired total number of Trials. - parallel_trial_count (int): - Required. The desired number of Trials to run - in parallel. - max_failed_trial_count (int): - The number of failed Trials that need to be - seen before failing the HyperparameterTuningJob. - If set to 0, Vertex AI decides how many Trials - must fail before the whole job fails. - trial_job_spec (google.cloud.aiplatform_v1beta1.types.CustomJobSpec): - Required. The spec of a trial job. The same - spec applies to the CustomJobs created in all - the trials. - trials (Sequence[google.cloud.aiplatform_v1beta1.types.Trial]): - Output only. Trials of the - HyperparameterTuningJob. - state (google.cloud.aiplatform_v1beta1.types.JobState): - Output only. The detailed state of the job. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the - HyperparameterTuningJob was created. - start_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the HyperparameterTuningJob for the - first time entered the ``JOB_STATE_RUNNING`` state. - end_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the HyperparameterTuningJob entered - any of the following states: ``JOB_STATE_SUCCEEDED``, - ``JOB_STATE_FAILED``, ``JOB_STATE_CANCELLED``. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the - HyperparameterTuningJob was most recently - updated. - error (google.rpc.status_pb2.Status): - Output only. Only populated when job's state is - JOB_STATE_FAILED or JOB_STATE_CANCELLED. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob.LabelsEntry]): - The labels with user-defined metadata to - organize HyperparameterTuningJobs. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. - See https://goo.gl/xmQnxf for more information - and examples of labels. - encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec): - Customer-managed encryption key options for a - HyperparameterTuningJob. If this is set, then - all resources created by the - HyperparameterTuningJob will be encrypted with - the provided encryption key. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - study_spec = proto.Field( - proto.MESSAGE, - number=4, - message=study.StudySpec, - ) - max_trial_count = proto.Field( - proto.INT32, - number=5, - ) - parallel_trial_count = proto.Field( - proto.INT32, - number=6, - ) - max_failed_trial_count = proto.Field( - proto.INT32, - number=7, - ) - trial_job_spec = proto.Field( - proto.MESSAGE, - number=8, - message=custom_job.CustomJobSpec, - ) - trials = proto.RepeatedField( - proto.MESSAGE, - number=9, - message=study.Trial, - ) - state = proto.Field( - proto.ENUM, - number=10, - enum=job_state.JobState, - ) - create_time = proto.Field( - proto.MESSAGE, - number=11, - message=timestamp_pb2.Timestamp, - ) - start_time = proto.Field( - proto.MESSAGE, - number=12, - message=timestamp_pb2.Timestamp, - ) - end_time = proto.Field( - proto.MESSAGE, - number=13, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=14, - message=timestamp_pb2.Timestamp, - ) - error = proto.Field( - proto.MESSAGE, - number=15, - message=status_pb2.Status, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=16, - ) - encryption_spec = proto.Field( - proto.MESSAGE, - number=17, - message=gca_encryption_spec.EncryptionSpec, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index.py deleted file mode 100644 index fef443aa5e..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index.py +++ /dev/null @@ -1,142 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import deployed_index_ref -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'Index', - }, -) - - -class Index(proto.Message): - r"""A representation of a collection of database items organized - in a way that allows for approximate nearest neighbor (a.k.a - ANN) algorithms search. - - Attributes: - name (str): - Output only. The resource name of the Index. - display_name (str): - Required. The display name of the Index. - The name can be up to 128 characters long and - can be consist of any UTF-8 characters. - description (str): - The description of the Index. - metadata_schema_uri (str): - Immutable. Points to a YAML file stored on Google Cloud - Storage describing additional information about the Index, - that is specific to it. Unset if the Index does not have any - additional information. The schema is defined as an OpenAPI - 3.0.2 `Schema - Object `__. - Note: The URI given on output will be immutable and probably - different, including the URI scheme, than the one given on - input. The output URI will point to a location where the - user only has a read access. - metadata (google.protobuf.struct_pb2.Value): - An additional information about the Index; the schema of the - metadata can be found in - [metadata_schema][google.cloud.aiplatform.v1beta1.Index.metadata_schema_uri]. - deployed_indexes (Sequence[google.cloud.aiplatform_v1beta1.types.DeployedIndexRef]): - Output only. The pointers to DeployedIndexes - created from this Index. An Index can be only - deleted if all its DeployedIndexes had been - undeployed first. - etag (str): - Used to perform consistent read-modify-write - updates. If not set, a blind "overwrite" update - happens. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.Index.LabelsEntry]): - The labels with user-defined metadata to - organize your Indexes. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. - See https://goo.gl/xmQnxf for more information - and examples of labels. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Index was - created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Index was most recently - updated. This also includes any update to the contents of - the Index. Note that Operations working on this Index may - have their - [Operations.metadata.generic_metadata.update_time] - [google.cloud.aiplatform.v1beta1.GenericOperationMetadata.update_time] - a little after the value of this timestamp, yet that does - not mean their results are not already reflected in the - Index. Result of any successfully completed Operation on the - Index is reflected in it. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - description = proto.Field( - proto.STRING, - number=3, - ) - metadata_schema_uri = proto.Field( - proto.STRING, - number=4, - ) - metadata = proto.Field( - proto.MESSAGE, - number=6, - message=struct_pb2.Value, - ) - deployed_indexes = proto.RepeatedField( - proto.MESSAGE, - number=7, - message=deployed_index_ref.DeployedIndexRef, - ) - etag = proto.Field( - proto.STRING, - number=8, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=9, - ) - create_time = proto.Field( - proto.MESSAGE, - number=10, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=11, - message=timestamp_pb2.Timestamp, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index_endpoint.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index_endpoint.py deleted file mode 100644 index 95e807b628..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index_endpoint.py +++ /dev/null @@ -1,371 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import machine_resources -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'IndexEndpoint', - 'DeployedIndex', - 'DeployedIndexAuthConfig', - 'IndexPrivateEndpoints', - }, -) - - -class IndexEndpoint(proto.Message): - r"""Indexes are deployed into it. An IndexEndpoint can have - multiple DeployedIndexes. - - Attributes: - name (str): - Output only. The resource name of the - IndexEndpoint. - display_name (str): - Required. The display name of the - IndexEndpoint. The name can be up to 128 - characters long and can consist of any UTF-8 - characters. - description (str): - The description of the IndexEndpoint. - deployed_indexes (Sequence[google.cloud.aiplatform_v1beta1.types.DeployedIndex]): - Output only. The indexes deployed in this - endpoint. - etag (str): - Used to perform consistent read-modify-write - updates. If not set, a blind "overwrite" update - happens. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.IndexEndpoint.LabelsEntry]): - The labels with user-defined metadata to - organize your IndexEndpoints. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. - See https://goo.gl/xmQnxf for more information - and examples of labels. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - IndexEndpoint was created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - IndexEndpoint was last updated. This timestamp - is not updated when the endpoint's - DeployedIndexes are updated, e.g. due to updates - of the original Indexes they are the deployments - of. - network (str): - Optional. The full name of the Google Compute Engine - `network `__ - to which the IndexEndpoint should be peered. - - Private services access must already be configured for the - network. If left unspecified, the Endpoint is not peered - with any network. - - Only one of the fields, - [network][google.cloud.aiplatform.v1beta1.IndexEndpoint.network] - or - [enable_private_service_connect][google.cloud.aiplatform.v1beta1.IndexEndpoint.enable_private_service_connect], - can be set. - - `Format `__: - projects/{project}/global/networks/{network}. Where - {project} is a project number, as in '12345', and {network} - is network name. - enable_private_service_connect (bool): - Optional. If true, expose the IndexEndpoint via private - service connect. - - Only one of the fields, - [network][google.cloud.aiplatform.v1beta1.IndexEndpoint.network] - or - [enable_private_service_connect][google.cloud.aiplatform.v1beta1.IndexEndpoint.enable_private_service_connect], - can be set. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - description = proto.Field( - proto.STRING, - number=3, - ) - deployed_indexes = proto.RepeatedField( - proto.MESSAGE, - number=4, - message='DeployedIndex', - ) - etag = proto.Field( - proto.STRING, - number=5, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=6, - ) - create_time = proto.Field( - proto.MESSAGE, - number=7, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=8, - message=timestamp_pb2.Timestamp, - ) - network = proto.Field( - proto.STRING, - number=9, - ) - enable_private_service_connect = proto.Field( - proto.BOOL, - number=10, - ) - - -class DeployedIndex(proto.Message): - r"""A deployment of an Index. IndexEndpoints contain one or more - DeployedIndexes. - - Attributes: - id (str): - Required. The user specified ID of the - DeployedIndex. The ID can be up to 128 - characters long and must start with a letter and - only contain letters, numbers, and underscores. - The ID must be unique within the project it is - created in. - index (str): - Required. The name of the Index this is the - deployment of. We may refer to this Index as the - DeployedIndex's "original" Index. - display_name (str): - The display name of the DeployedIndex. If not provided upon - creation, the Index's display_name is used. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when the DeployedIndex - was created. - private_endpoints (google.cloud.aiplatform_v1beta1.types.IndexPrivateEndpoints): - Output only. Provides paths for users to send requests - directly to the deployed index services running on Cloud via - private services access. This field is populated if - [network][google.cloud.aiplatform.v1beta1.IndexEndpoint.network] - is configured. - index_sync_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. The DeployedIndex may depend on various data on - its original Index. Additionally when certain changes to the - original Index are being done (e.g. when what the Index - contains is being changed) the DeployedIndex may be - asynchronously updated in the background to reflect this - changes. If this timestamp's value is at least the - [Index.update_time][google.cloud.aiplatform.v1beta1.Index.update_time] - of the original Index, it means that this DeployedIndex and - the original Index are in sync. If this timestamp is older, - then to see which updates this DeployedIndex already - contains (and which not), one must - [list][Operations.ListOperations] [Operations][Operation] - [working][Operation.name] on the original Index. Only the - successfully completed Operations with - [Operations.metadata.generic_metadata.update_time] - [google.cloud.aiplatform.v1beta1.GenericOperationMetadata.update_time] - equal or before this sync time are contained in this - DeployedIndex. - automatic_resources (google.cloud.aiplatform_v1beta1.types.AutomaticResources): - Optional. A description of resources that the DeployedIndex - uses, which to large degree are decided by Vertex AI, and - optionally allows only a modest additional configuration. If - min_replica_count is not set, the default value is 2 (we - don't provide SLA when min_replica_count=1). If - max_replica_count is not set, the default value is - min_replica_count. The max allowed replica count is 1000. - enable_access_logging (bool): - Optional. If true, private endpoint's access - logs are sent to StackDriver Logging. - These logs are like standard server access logs, - containing information like timestamp and - latency for each MatchRequest. - Note that Stackdriver logs may incur a cost, - especially if the deployed index receives a high - queries per second rate (QPS). Estimate your - costs before enabling this option. - deployed_index_auth_config (google.cloud.aiplatform_v1beta1.types.DeployedIndexAuthConfig): - Optional. If set, the authentication is - enabled for the private endpoint. - reserved_ip_ranges (Sequence[str]): - Optional. A list of reserved ip ranges under - the VPC network that can be used for this - DeployedIndex. - If set, we will deploy the index within the - provided ip ranges. Otherwise, the index might - be deployed to any ip ranges under the provided - VPC network. - - The value sohuld be the name of the address - (https://cloud.google.com/compute/docs/reference/rest/v1/addresses) - Example: 'vertex-ai-ip-range'. - deployment_group (str): - Optional. The deployment group can be no longer than 64 - characters (eg: 'test', 'prod'). If not set, we will use the - 'default' deployment group. - - Creating ``deployment_groups`` with ``reserved_ip_ranges`` - is a recommended practice when the peered network has - multiple peering ranges. This creates your deployments from - predictable IP spaces for easier traffic administration. - Also, one deployment_group (except 'default') can only be - used with the same reserved_ip_ranges which means if the - deployment_group has been used with reserved_ip_ranges: [a, - b, c], using it with [a, b] or [d, e] is disallowed. - - Note: we only support up to 5 deployment groups(not - including 'default'). - """ - - id = proto.Field( - proto.STRING, - number=1, - ) - index = proto.Field( - proto.STRING, - number=2, - ) - display_name = proto.Field( - proto.STRING, - number=3, - ) - create_time = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - private_endpoints = proto.Field( - proto.MESSAGE, - number=5, - message='IndexPrivateEndpoints', - ) - index_sync_time = proto.Field( - proto.MESSAGE, - number=6, - message=timestamp_pb2.Timestamp, - ) - automatic_resources = proto.Field( - proto.MESSAGE, - number=7, - message=machine_resources.AutomaticResources, - ) - enable_access_logging = proto.Field( - proto.BOOL, - number=8, - ) - deployed_index_auth_config = proto.Field( - proto.MESSAGE, - number=9, - message='DeployedIndexAuthConfig', - ) - reserved_ip_ranges = proto.RepeatedField( - proto.STRING, - number=10, - ) - deployment_group = proto.Field( - proto.STRING, - number=11, - ) - - -class DeployedIndexAuthConfig(proto.Message): - r"""Used to set up the auth on the DeployedIndex's private - endpoint. - - Attributes: - auth_provider (google.cloud.aiplatform_v1beta1.types.DeployedIndexAuthConfig.AuthProvider): - Defines the authentication provider that the - DeployedIndex uses. - """ - - class AuthProvider(proto.Message): - r"""Configuration for an authentication provider, including support for - `JSON Web Token - (JWT) `__. - - Attributes: - audiences (Sequence[str]): - The list of JWT - `audiences `__. - that are allowed to access. A JWT containing any of these - audiences will be accepted. - allowed_issuers (Sequence[str]): - A list of allowed JWT issuers. Each entry must be a valid - Google service account, in the following format: - - ``service-account-name@project-id.iam.gserviceaccount.com`` - """ - - audiences = proto.RepeatedField( - proto.STRING, - number=1, - ) - allowed_issuers = proto.RepeatedField( - proto.STRING, - number=2, - ) - - auth_provider = proto.Field( - proto.MESSAGE, - number=1, - message=AuthProvider, - ) - - -class IndexPrivateEndpoints(proto.Message): - r"""IndexPrivateEndpoints proto is used to provide paths for users to - send requests via private endpoints (e.g. private service access, - private service connect). To send request via private service - access, use match_grpc_address. To send request via private service - connect, use service_attachment. - - Attributes: - match_grpc_address (str): - Output only. The ip address used to send - match gRPC requests. - service_attachment (str): - Output only. The name of the service - attachment resource. Populated if private - service connect is enabled. - """ - - match_grpc_address = proto.Field( - proto.STRING, - number=1, - ) - service_attachment = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index_endpoint_service.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index_endpoint_service.py deleted file mode 100644 index 61a110521e..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index_endpoint_service.py +++ /dev/null @@ -1,419 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import index_endpoint as gca_index_endpoint -from google.cloud.aiplatform_v1beta1.types import operation -from google.protobuf import field_mask_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'CreateIndexEndpointRequest', - 'CreateIndexEndpointOperationMetadata', - 'GetIndexEndpointRequest', - 'ListIndexEndpointsRequest', - 'ListIndexEndpointsResponse', - 'UpdateIndexEndpointRequest', - 'DeleteIndexEndpointRequest', - 'DeployIndexRequest', - 'DeployIndexResponse', - 'DeployIndexOperationMetadata', - 'UndeployIndexRequest', - 'UndeployIndexResponse', - 'UndeployIndexOperationMetadata', - 'MutateDeployedIndexRequest', - 'MutateDeployedIndexResponse', - 'MutateDeployedIndexOperationMetadata', - }, -) - - -class CreateIndexEndpointRequest(proto.Message): - r"""Request message for - [IndexEndpointService.CreateIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.CreateIndexEndpoint]. - - Attributes: - parent (str): - Required. The resource name of the Location to create the - IndexEndpoint in. Format: - ``projects/{project}/locations/{location}`` - index_endpoint (google.cloud.aiplatform_v1beta1.types.IndexEndpoint): - Required. The IndexEndpoint to create. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - index_endpoint = proto.Field( - proto.MESSAGE, - number=2, - message=gca_index_endpoint.IndexEndpoint, - ) - - -class CreateIndexEndpointOperationMetadata(proto.Message): - r"""Runtime operation information for - [IndexEndpointService.CreateIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.CreateIndexEndpoint]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - The operation generic information. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class GetIndexEndpointRequest(proto.Message): - r"""Request message for - [IndexEndpointService.GetIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.GetIndexEndpoint] - - Attributes: - name (str): - Required. The name of the IndexEndpoint resource. Format: - ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListIndexEndpointsRequest(proto.Message): - r"""Request message for - [IndexEndpointService.ListIndexEndpoints][google.cloud.aiplatform.v1beta1.IndexEndpointService.ListIndexEndpoints]. - - Attributes: - parent (str): - Required. The resource name of the Location from which to - list the IndexEndpoints. Format: - ``projects/{project}/locations/{location}`` - filter (str): - Optional. An expression for filtering the results of the - request. For field names both snake_case and camelCase are - supported. - - - ``index_endpoint`` supports = and !=. ``index_endpoint`` - represents the IndexEndpoint ID, ie. the last segment of - the IndexEndpoint's - [resourcename][google.cloud.aiplatform.v1beta1.IndexEndpoint.name]. - - ``display_name`` supports =, != and regex() (uses - `re2 `__ - syntax) - - ``labels`` supports general map functions that is: - ``labels.key=value`` - key:value equality - ``labels.key:* or labels:key - key existence A key including a space must be quoted.``\ labels."a - key"`. - - Some examples: - - - ``index_endpoint="1"`` - - ``display_name="myDisplayName"`` - - \`regex(display_name, "^A") -> The display name starts - with an A. - - ``labels.myKey="myValue"`` - page_size (int): - Optional. The standard list page size. - page_token (str): - Optional. The standard list page token. Typically obtained - via - [ListIndexEndpointsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListIndexEndpointsResponse.next_page_token] - of the previous - [IndexEndpointService.ListIndexEndpoints][google.cloud.aiplatform.v1beta1.IndexEndpointService.ListIndexEndpoints] - call. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Optional. Mask specifying which fields to - read. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=5, - message=field_mask_pb2.FieldMask, - ) - - -class ListIndexEndpointsResponse(proto.Message): - r"""Response message for - [IndexEndpointService.ListIndexEndpoints][google.cloud.aiplatform.v1beta1.IndexEndpointService.ListIndexEndpoints]. - - Attributes: - index_endpoints (Sequence[google.cloud.aiplatform_v1beta1.types.IndexEndpoint]): - List of IndexEndpoints in the requested page. - next_page_token (str): - A token to retrieve next page of results. Pass to - [ListIndexEndpointsRequest.page_token][google.cloud.aiplatform.v1beta1.ListIndexEndpointsRequest.page_token] - to obtain that page. - """ - - @property - def raw_page(self): - return self - - index_endpoints = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_index_endpoint.IndexEndpoint, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class UpdateIndexEndpointRequest(proto.Message): - r"""Request message for - [IndexEndpointService.UpdateIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.UpdateIndexEndpoint]. - - Attributes: - index_endpoint (google.cloud.aiplatform_v1beta1.types.IndexEndpoint): - Required. The IndexEndpoint which replaces - the resource on the server. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The update mask applies to the resource. See - [google.protobuf.FieldMask][google.protobuf.FieldMask]. - """ - - index_endpoint = proto.Field( - proto.MESSAGE, - number=1, - message=gca_index_endpoint.IndexEndpoint, - ) - update_mask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - - -class DeleteIndexEndpointRequest(proto.Message): - r"""Request message for - [IndexEndpointService.DeleteIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeleteIndexEndpoint]. - - Attributes: - name (str): - Required. The name of the IndexEndpoint resource to be - deleted. Format: - ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class DeployIndexRequest(proto.Message): - r"""Request message for - [IndexEndpointService.DeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeployIndex]. - - Attributes: - index_endpoint (str): - Required. The name of the IndexEndpoint resource into which - to deploy an Index. Format: - ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` - deployed_index (google.cloud.aiplatform_v1beta1.types.DeployedIndex): - Required. The DeployedIndex to be created - within the IndexEndpoint. - """ - - index_endpoint = proto.Field( - proto.STRING, - number=1, - ) - deployed_index = proto.Field( - proto.MESSAGE, - number=2, - message=gca_index_endpoint.DeployedIndex, - ) - - -class DeployIndexResponse(proto.Message): - r"""Response message for - [IndexEndpointService.DeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeployIndex]. - - Attributes: - deployed_index (google.cloud.aiplatform_v1beta1.types.DeployedIndex): - The DeployedIndex that had been deployed in - the IndexEndpoint. - """ - - deployed_index = proto.Field( - proto.MESSAGE, - number=1, - message=gca_index_endpoint.DeployedIndex, - ) - - -class DeployIndexOperationMetadata(proto.Message): - r"""Runtime operation information for - [IndexEndpointService.DeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeployIndex]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - The operation generic information. - deployed_index_id (str): - The unique index id specified by user - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - deployed_index_id = proto.Field( - proto.STRING, - number=2, - ) - - -class UndeployIndexRequest(proto.Message): - r"""Request message for - [IndexEndpointService.UndeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.UndeployIndex]. - - Attributes: - index_endpoint (str): - Required. The name of the IndexEndpoint resource from which - to undeploy an Index. Format: - ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` - deployed_index_id (str): - Required. The ID of the DeployedIndex to be - undeployed from the IndexEndpoint. - """ - - index_endpoint = proto.Field( - proto.STRING, - number=1, - ) - deployed_index_id = proto.Field( - proto.STRING, - number=2, - ) - - -class UndeployIndexResponse(proto.Message): - r"""Response message for - [IndexEndpointService.UndeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.UndeployIndex]. - - """ - - -class UndeployIndexOperationMetadata(proto.Message): - r"""Runtime operation information for - [IndexEndpointService.UndeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.UndeployIndex]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - The operation generic information. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class MutateDeployedIndexRequest(proto.Message): - r"""Request message for - [IndexEndpointService.MutateDeployedIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.MutateDeployedIndex]. - - Attributes: - index_endpoint (str): - Required. The name of the IndexEndpoint resource into which - to deploy an Index. Format: - ``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}`` - deployed_index (google.cloud.aiplatform_v1beta1.types.DeployedIndex): - Required. The DeployedIndex to be updated within the - IndexEndpoint. Currently, the updatable fields are - [DeployedIndex][automatic_resources] and - [DeployedIndex][dedicated_resources] - """ - - index_endpoint = proto.Field( - proto.STRING, - number=1, - ) - deployed_index = proto.Field( - proto.MESSAGE, - number=2, - message=gca_index_endpoint.DeployedIndex, - ) - - -class MutateDeployedIndexResponse(proto.Message): - r"""Response message for - [IndexEndpointService.MutateDeployedIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.MutateDeployedIndex]. - - Attributes: - deployed_index (google.cloud.aiplatform_v1beta1.types.DeployedIndex): - The DeployedIndex that had been updated in - the IndexEndpoint. - """ - - deployed_index = proto.Field( - proto.MESSAGE, - number=1, - message=gca_index_endpoint.DeployedIndex, - ) - - -class MutateDeployedIndexOperationMetadata(proto.Message): - r"""Runtime operation information for - [IndexEndpointService.MutateDeployedIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.MutateDeployedIndex]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - The operation generic information. - deployed_index_id (str): - The unique index id specified by user - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - deployed_index_id = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index_service.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index_service.py deleted file mode 100644 index bf1ea3d2d3..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/index_service.py +++ /dev/null @@ -1,362 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import index as gca_index -from google.cloud.aiplatform_v1beta1.types import operation -from google.protobuf import field_mask_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'CreateIndexRequest', - 'CreateIndexOperationMetadata', - 'GetIndexRequest', - 'ListIndexesRequest', - 'ListIndexesResponse', - 'UpdateIndexRequest', - 'UpdateIndexOperationMetadata', - 'DeleteIndexRequest', - 'NearestNeighborSearchOperationMetadata', - }, -) - - -class CreateIndexRequest(proto.Message): - r"""Request message for - [IndexService.CreateIndex][google.cloud.aiplatform.v1beta1.IndexService.CreateIndex]. - - Attributes: - parent (str): - Required. The resource name of the Location to create the - Index in. Format: - ``projects/{project}/locations/{location}`` - index (google.cloud.aiplatform_v1beta1.types.Index): - Required. The Index to create. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - index = proto.Field( - proto.MESSAGE, - number=2, - message=gca_index.Index, - ) - - -class CreateIndexOperationMetadata(proto.Message): - r"""Runtime operation information for - [IndexService.CreateIndex][google.cloud.aiplatform.v1beta1.IndexService.CreateIndex]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - The operation generic information. - nearest_neighbor_search_operation_metadata (google.cloud.aiplatform_v1beta1.types.NearestNeighborSearchOperationMetadata): - The operation metadata with regard to - Matching Engine Index operation. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - nearest_neighbor_search_operation_metadata = proto.Field( - proto.MESSAGE, - number=2, - message='NearestNeighborSearchOperationMetadata', - ) - - -class GetIndexRequest(proto.Message): - r"""Request message for - [IndexService.GetIndex][google.cloud.aiplatform.v1beta1.IndexService.GetIndex] - - Attributes: - name (str): - Required. The name of the Index resource. Format: - ``projects/{project}/locations/{location}/indexes/{index}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListIndexesRequest(proto.Message): - r"""Request message for - [IndexService.ListIndexes][google.cloud.aiplatform.v1beta1.IndexService.ListIndexes]. - - Attributes: - parent (str): - Required. The resource name of the Location from which to - list the Indexes. Format: - ``projects/{project}/locations/{location}`` - filter (str): - The standard list filter. - page_size (int): - The standard list page size. - page_token (str): - The standard list page token. Typically obtained via - [ListIndexesResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListIndexesResponse.next_page_token] - of the previous - [IndexService.ListIndexes][google.cloud.aiplatform.v1beta1.IndexService.ListIndexes] - call. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=5, - message=field_mask_pb2.FieldMask, - ) - - -class ListIndexesResponse(proto.Message): - r"""Response message for - [IndexService.ListIndexes][google.cloud.aiplatform.v1beta1.IndexService.ListIndexes]. - - Attributes: - indexes (Sequence[google.cloud.aiplatform_v1beta1.types.Index]): - List of indexes in the requested page. - next_page_token (str): - A token to retrieve next page of results. Pass to - [ListIndexesRequest.page_token][google.cloud.aiplatform.v1beta1.ListIndexesRequest.page_token] - to obtain that page. - """ - - @property - def raw_page(self): - return self - - indexes = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_index.Index, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class UpdateIndexRequest(proto.Message): - r"""Request message for - [IndexService.UpdateIndex][google.cloud.aiplatform.v1beta1.IndexService.UpdateIndex]. - - Attributes: - index (google.cloud.aiplatform_v1beta1.types.Index): - Required. The Index which updates the - resource on the server. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - The update mask applies to the resource. For the - ``FieldMask`` definition, see - [google.protobuf.FieldMask][google.protobuf.FieldMask]. - """ - - index = proto.Field( - proto.MESSAGE, - number=1, - message=gca_index.Index, - ) - update_mask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - - -class UpdateIndexOperationMetadata(proto.Message): - r"""Runtime operation information for - [IndexService.UpdateIndex][google.cloud.aiplatform.v1beta1.IndexService.UpdateIndex]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - The operation generic information. - nearest_neighbor_search_operation_metadata (google.cloud.aiplatform_v1beta1.types.NearestNeighborSearchOperationMetadata): - The operation metadata with regard to - Matching Engine Index operation. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - nearest_neighbor_search_operation_metadata = proto.Field( - proto.MESSAGE, - number=2, - message='NearestNeighborSearchOperationMetadata', - ) - - -class DeleteIndexRequest(proto.Message): - r"""Request message for - [IndexService.DeleteIndex][google.cloud.aiplatform.v1beta1.IndexService.DeleteIndex]. - - Attributes: - name (str): - Required. The name of the Index resource to be deleted. - Format: - ``projects/{project}/locations/{location}/indexes/{index}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class NearestNeighborSearchOperationMetadata(proto.Message): - r"""Runtime operation metadata with regard to Matching Engine - Index. - - Attributes: - content_validation_stats (Sequence[google.cloud.aiplatform_v1beta1.types.NearestNeighborSearchOperationMetadata.ContentValidationStats]): - The validation stats of the content (per file) to be - inserted or updated on the Matching Engine Index resource. - Populated if contentsDeltaUri is provided as part of - [Index.metadata][google.cloud.aiplatform.v1beta1.Index.metadata]. - Please note that, currently for those files that are broken - or has unsupported file format, we will not have the stats - for those files. - data_bytes_count (int): - The ingested data size in bytes. - """ - - class RecordError(proto.Message): - r""" - - Attributes: - error_type (google.cloud.aiplatform_v1beta1.types.NearestNeighborSearchOperationMetadata.RecordError.RecordErrorType): - The error type of this record. - error_message (str): - A human-readable message that is shown to the user to help - them fix the error. Note that this message may change from - time to time, your code should check against error_type as - the source of truth. - source_gcs_uri (str): - Cloud Storage URI pointing to the original - file in user's bucket. - embedding_id (str): - Empty if the embedding id is failed to parse. - raw_record (str): - The original content of this record. - """ - class RecordErrorType(proto.Enum): - r"""""" - ERROR_TYPE_UNSPECIFIED = 0 - EMPTY_LINE = 1 - INVALID_JSON_SYNTAX = 2 - INVALID_CSV_SYNTAX = 3 - INVALID_AVRO_SYNTAX = 4 - INVALID_EMBEDDING_ID = 5 - EMBEDDING_SIZE_MISMATCH = 6 - NAMESPACE_MISSING = 7 - - error_type = proto.Field( - proto.ENUM, - number=1, - enum='NearestNeighborSearchOperationMetadata.RecordError.RecordErrorType', - ) - error_message = proto.Field( - proto.STRING, - number=2, - ) - source_gcs_uri = proto.Field( - proto.STRING, - number=3, - ) - embedding_id = proto.Field( - proto.STRING, - number=4, - ) - raw_record = proto.Field( - proto.STRING, - number=5, - ) - - class ContentValidationStats(proto.Message): - r""" - - Attributes: - source_gcs_uri (str): - Cloud Storage URI pointing to the original - file in user's bucket. - valid_record_count (int): - Number of records in this file that were - successfully processed. - invalid_record_count (int): - Number of records in this file we skipped due - to validate errors. - partial_errors (Sequence[google.cloud.aiplatform_v1beta1.types.NearestNeighborSearchOperationMetadata.RecordError]): - The detail information of the partial - failures encountered for those invalid records - that couldn't be parsed. Up to 50 partial errors - will be reported. - """ - - source_gcs_uri = proto.Field( - proto.STRING, - number=1, - ) - valid_record_count = proto.Field( - proto.INT64, - number=2, - ) - invalid_record_count = proto.Field( - proto.INT64, - number=3, - ) - partial_errors = proto.RepeatedField( - proto.MESSAGE, - number=4, - message='NearestNeighborSearchOperationMetadata.RecordError', - ) - - content_validation_stats = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=ContentValidationStats, - ) - data_bytes_count = proto.Field( - proto.INT64, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/io.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/io.py deleted file mode 100644 index a1e80754a7..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/io.py +++ /dev/null @@ -1,198 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'AvroSource', - 'CsvSource', - 'GcsSource', - 'GcsDestination', - 'BigQuerySource', - 'BigQueryDestination', - 'CsvDestination', - 'TFRecordDestination', - 'ContainerRegistryDestination', - }, -) - - -class AvroSource(proto.Message): - r"""The storage details for Avro input content. - - Attributes: - gcs_source (google.cloud.aiplatform_v1beta1.types.GcsSource): - Required. Google Cloud Storage location. - """ - - gcs_source = proto.Field( - proto.MESSAGE, - number=1, - message='GcsSource', - ) - - -class CsvSource(proto.Message): - r"""The storage details for CSV input content. - - Attributes: - gcs_source (google.cloud.aiplatform_v1beta1.types.GcsSource): - Required. Google Cloud Storage location. - """ - - gcs_source = proto.Field( - proto.MESSAGE, - number=1, - message='GcsSource', - ) - - -class GcsSource(proto.Message): - r"""The Google Cloud Storage location for the input content. - - Attributes: - uris (Sequence[str]): - Required. Google Cloud Storage URI(-s) to the - input file(s). May contain wildcards. For more - information on wildcards, see - https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames. - """ - - uris = proto.RepeatedField( - proto.STRING, - number=1, - ) - - -class GcsDestination(proto.Message): - r"""The Google Cloud Storage location where the output is to be - written to. - - Attributes: - output_uri_prefix (str): - Required. Google Cloud Storage URI to output - directory. If the uri doesn't end with '/', a - '/' will be automatically appended. The - directory is created if it doesn't exist. - """ - - output_uri_prefix = proto.Field( - proto.STRING, - number=1, - ) - - -class BigQuerySource(proto.Message): - r"""The BigQuery location for the input content. - - Attributes: - input_uri (str): - Required. BigQuery URI to a table, up to 2000 characters - long. Accepted forms: - - - BigQuery path. For example: - ``bq://projectId.bqDatasetId.bqTableId``. - """ - - input_uri = proto.Field( - proto.STRING, - number=1, - ) - - -class BigQueryDestination(proto.Message): - r"""The BigQuery location for the output content. - - Attributes: - output_uri (str): - Required. BigQuery URI to a project or table, up to 2000 - characters long. - - When only the project is specified, the Dataset and Table is - created. When the full table reference is specified, the - Dataset must exist and table must not exist. - - Accepted forms: - - - BigQuery path. For example: ``bq://projectId`` or - ``bq://projectId.bqDatasetId`` or - ``bq://projectId.bqDatasetId.bqTableId``. - """ - - output_uri = proto.Field( - proto.STRING, - number=1, - ) - - -class CsvDestination(proto.Message): - r"""The storage details for CSV output content. - - Attributes: - gcs_destination (google.cloud.aiplatform_v1beta1.types.GcsDestination): - Required. Google Cloud Storage location. - """ - - gcs_destination = proto.Field( - proto.MESSAGE, - number=1, - message='GcsDestination', - ) - - -class TFRecordDestination(proto.Message): - r"""The storage details for TFRecord output content. - - Attributes: - gcs_destination (google.cloud.aiplatform_v1beta1.types.GcsDestination): - Required. Google Cloud Storage location. - """ - - gcs_destination = proto.Field( - proto.MESSAGE, - number=1, - message='GcsDestination', - ) - - -class ContainerRegistryDestination(proto.Message): - r"""The Container Registry location for the container image. - - Attributes: - output_uri (str): - Required. Container Registry URI of a container image. Only - Google Container Registry and Artifact Registry are - supported now. Accepted forms: - - - Google Container Registry path. For example: - ``gcr.io/projectId/imageName:tag``. - - - Artifact Registry path. For example: - ``us-central1-docker.pkg.dev/projectId/repoName/imageName:tag``. - - If a tag is not specified, "latest" will be used as the - default tag. - """ - - output_uri = proto.Field( - proto.STRING, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/job_service.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/job_service.py deleted file mode 100644 index 063f3afb0f..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/job_service.py +++ /dev/null @@ -1,1093 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import batch_prediction_job as gca_batch_prediction_job -from google.cloud.aiplatform_v1beta1.types import custom_job as gca_custom_job -from google.cloud.aiplatform_v1beta1.types import data_labeling_job as gca_data_labeling_job -from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job as gca_hyperparameter_tuning_job -from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job as gca_model_deployment_monitoring_job -from google.cloud.aiplatform_v1beta1.types import operation -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'CreateCustomJobRequest', - 'GetCustomJobRequest', - 'ListCustomJobsRequest', - 'ListCustomJobsResponse', - 'DeleteCustomJobRequest', - 'CancelCustomJobRequest', - 'CreateDataLabelingJobRequest', - 'GetDataLabelingJobRequest', - 'ListDataLabelingJobsRequest', - 'ListDataLabelingJobsResponse', - 'DeleteDataLabelingJobRequest', - 'CancelDataLabelingJobRequest', - 'CreateHyperparameterTuningJobRequest', - 'GetHyperparameterTuningJobRequest', - 'ListHyperparameterTuningJobsRequest', - 'ListHyperparameterTuningJobsResponse', - 'DeleteHyperparameterTuningJobRequest', - 'CancelHyperparameterTuningJobRequest', - 'CreateBatchPredictionJobRequest', - 'GetBatchPredictionJobRequest', - 'ListBatchPredictionJobsRequest', - 'ListBatchPredictionJobsResponse', - 'DeleteBatchPredictionJobRequest', - 'CancelBatchPredictionJobRequest', - 'CreateModelDeploymentMonitoringJobRequest', - 'SearchModelDeploymentMonitoringStatsAnomaliesRequest', - 'SearchModelDeploymentMonitoringStatsAnomaliesResponse', - 'GetModelDeploymentMonitoringJobRequest', - 'ListModelDeploymentMonitoringJobsRequest', - 'ListModelDeploymentMonitoringJobsResponse', - 'UpdateModelDeploymentMonitoringJobRequest', - 'DeleteModelDeploymentMonitoringJobRequest', - 'PauseModelDeploymentMonitoringJobRequest', - 'ResumeModelDeploymentMonitoringJobRequest', - 'UpdateModelDeploymentMonitoringJobOperationMetadata', - }, -) - - -class CreateCustomJobRequest(proto.Message): - r"""Request message for - [JobService.CreateCustomJob][google.cloud.aiplatform.v1beta1.JobService.CreateCustomJob]. - - Attributes: - parent (str): - Required. The resource name of the Location to create the - CustomJob in. Format: - ``projects/{project}/locations/{location}`` - custom_job (google.cloud.aiplatform_v1beta1.types.CustomJob): - Required. The CustomJob to create. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - custom_job = proto.Field( - proto.MESSAGE, - number=2, - message=gca_custom_job.CustomJob, - ) - - -class GetCustomJobRequest(proto.Message): - r"""Request message for - [JobService.GetCustomJob][google.cloud.aiplatform.v1beta1.JobService.GetCustomJob]. - - Attributes: - name (str): - Required. The name of the CustomJob resource. Format: - ``projects/{project}/locations/{location}/customJobs/{custom_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListCustomJobsRequest(proto.Message): - r"""Request message for - [JobService.ListCustomJobs][google.cloud.aiplatform.v1beta1.JobService.ListCustomJobs]. - - Attributes: - parent (str): - Required. The resource name of the Location to list the - CustomJobs from. Format: - ``projects/{project}/locations/{location}`` - filter (str): - The standard list filter. - - Supported fields: - - - ``display_name`` supports = and !=. - - - ``state`` supports = and !=. - - Some examples of using the filter are: - - - ``state="JOB_STATE_SUCCEEDED" AND display_name="my_job"`` - - - ``state="JOB_STATE_RUNNING" OR display_name="my_job"`` - - - ``NOT display_name="my_job"`` - - - ``state="JOB_STATE_FAILED"`` - page_size (int): - The standard list page size. - page_token (str): - The standard list page token. Typically obtained via - [ListCustomJobsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListCustomJobsResponse.next_page_token] - of the previous - [JobService.ListCustomJobs][google.cloud.aiplatform.v1beta1.JobService.ListCustomJobs] - call. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=5, - message=field_mask_pb2.FieldMask, - ) - - -class ListCustomJobsResponse(proto.Message): - r"""Response message for - [JobService.ListCustomJobs][google.cloud.aiplatform.v1beta1.JobService.ListCustomJobs] - - Attributes: - custom_jobs (Sequence[google.cloud.aiplatform_v1beta1.types.CustomJob]): - List of CustomJobs in the requested page. - next_page_token (str): - A token to retrieve the next page of results. Pass to - [ListCustomJobsRequest.page_token][google.cloud.aiplatform.v1beta1.ListCustomJobsRequest.page_token] - to obtain that page. - """ - - @property - def raw_page(self): - return self - - custom_jobs = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_custom_job.CustomJob, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class DeleteCustomJobRequest(proto.Message): - r"""Request message for - [JobService.DeleteCustomJob][google.cloud.aiplatform.v1beta1.JobService.DeleteCustomJob]. - - Attributes: - name (str): - Required. The name of the CustomJob resource to be deleted. - Format: - ``projects/{project}/locations/{location}/customJobs/{custom_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class CancelCustomJobRequest(proto.Message): - r"""Request message for - [JobService.CancelCustomJob][google.cloud.aiplatform.v1beta1.JobService.CancelCustomJob]. - - Attributes: - name (str): - Required. The name of the CustomJob to cancel. Format: - ``projects/{project}/locations/{location}/customJobs/{custom_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class CreateDataLabelingJobRequest(proto.Message): - r"""Request message for - [JobService.CreateDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.CreateDataLabelingJob]. - - Attributes: - parent (str): - Required. The parent of the DataLabelingJob. Format: - ``projects/{project}/locations/{location}`` - data_labeling_job (google.cloud.aiplatform_v1beta1.types.DataLabelingJob): - Required. The DataLabelingJob to create. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - data_labeling_job = proto.Field( - proto.MESSAGE, - number=2, - message=gca_data_labeling_job.DataLabelingJob, - ) - - -class GetDataLabelingJobRequest(proto.Message): - r"""Request message for - [JobService.GetDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.GetDataLabelingJob]. - - Attributes: - name (str): - Required. The name of the DataLabelingJob. Format: - ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListDataLabelingJobsRequest(proto.Message): - r"""Request message for - [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1beta1.JobService.ListDataLabelingJobs]. - - Attributes: - parent (str): - Required. The parent of the DataLabelingJob. Format: - ``projects/{project}/locations/{location}`` - filter (str): - The standard list filter. - - Supported fields: - - - ``display_name`` supports = and !=. - - - ``state`` supports = and !=. - - Some examples of using the filter are: - - - ``state="JOB_STATE_SUCCEEDED" AND display_name="my_job"`` - - - ``state="JOB_STATE_RUNNING" OR display_name="my_job"`` - - - ``NOT display_name="my_job"`` - - - ``state="JOB_STATE_FAILED"`` - page_size (int): - The standard list page size. - page_token (str): - The standard list page token. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. FieldMask represents a - set of symbolic field paths. For example, the mask can be - ``paths: "name"``. The "name" here is a field in - DataLabelingJob. If this field is not set, all fields of the - DataLabelingJob are returned. - order_by (str): - A comma-separated list of fields to order by, sorted in - ascending order by default. Use ``desc`` after a field name - for descending. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=5, - message=field_mask_pb2.FieldMask, - ) - order_by = proto.Field( - proto.STRING, - number=6, - ) - - -class ListDataLabelingJobsResponse(proto.Message): - r"""Response message for - [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1beta1.JobService.ListDataLabelingJobs]. - - Attributes: - data_labeling_jobs (Sequence[google.cloud.aiplatform_v1beta1.types.DataLabelingJob]): - A list of DataLabelingJobs that matches the - specified filter in the request. - next_page_token (str): - The standard List next-page token. - """ - - @property - def raw_page(self): - return self - - data_labeling_jobs = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_data_labeling_job.DataLabelingJob, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class DeleteDataLabelingJobRequest(proto.Message): - r"""Request message for - [JobService.DeleteDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.DeleteDataLabelingJob]. - - Attributes: - name (str): - Required. The name of the DataLabelingJob to be deleted. - Format: - ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class CancelDataLabelingJobRequest(proto.Message): - r"""Request message for - [JobService.CancelDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.CancelDataLabelingJob]. - - Attributes: - name (str): - Required. The name of the DataLabelingJob. Format: - ``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class CreateHyperparameterTuningJobRequest(proto.Message): - r"""Request message for - [JobService.CreateHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.CreateHyperparameterTuningJob]. - - Attributes: - parent (str): - Required. The resource name of the Location to create the - HyperparameterTuningJob in. Format: - ``projects/{project}/locations/{location}`` - hyperparameter_tuning_job (google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob): - Required. The HyperparameterTuningJob to - create. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - hyperparameter_tuning_job = proto.Field( - proto.MESSAGE, - number=2, - message=gca_hyperparameter_tuning_job.HyperparameterTuningJob, - ) - - -class GetHyperparameterTuningJobRequest(proto.Message): - r"""Request message for - [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.GetHyperparameterTuningJob]. - - Attributes: - name (str): - Required. The name of the HyperparameterTuningJob resource. - Format: - ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListHyperparameterTuningJobsRequest(proto.Message): - r"""Request message for - [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1beta1.JobService.ListHyperparameterTuningJobs]. - - Attributes: - parent (str): - Required. The resource name of the Location to list the - HyperparameterTuningJobs from. Format: - ``projects/{project}/locations/{location}`` - filter (str): - The standard list filter. - - Supported fields: - - - ``display_name`` supports = and !=. - - - ``state`` supports = and !=. - - Some examples of using the filter are: - - - ``state="JOB_STATE_SUCCEEDED" AND display_name="my_job"`` - - - ``state="JOB_STATE_RUNNING" OR display_name="my_job"`` - - - ``NOT display_name="my_job"`` - - - ``state="JOB_STATE_FAILED"`` - page_size (int): - The standard list page size. - page_token (str): - The standard list page token. Typically obtained via - [ListHyperparameterTuningJobsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListHyperparameterTuningJobsResponse.next_page_token] - of the previous - [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1beta1.JobService.ListHyperparameterTuningJobs] - call. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=5, - message=field_mask_pb2.FieldMask, - ) - - -class ListHyperparameterTuningJobsResponse(proto.Message): - r"""Response message for - [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1beta1.JobService.ListHyperparameterTuningJobs] - - Attributes: - hyperparameter_tuning_jobs (Sequence[google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob]): - List of HyperparameterTuningJobs in the requested page. - [HyperparameterTuningJob.trials][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.trials] - of the jobs will be not be returned. - next_page_token (str): - A token to retrieve the next page of results. Pass to - [ListHyperparameterTuningJobsRequest.page_token][google.cloud.aiplatform.v1beta1.ListHyperparameterTuningJobsRequest.page_token] - to obtain that page. - """ - - @property - def raw_page(self): - return self - - hyperparameter_tuning_jobs = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_hyperparameter_tuning_job.HyperparameterTuningJob, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class DeleteHyperparameterTuningJobRequest(proto.Message): - r"""Request message for - [JobService.DeleteHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.DeleteHyperparameterTuningJob]. - - Attributes: - name (str): - Required. The name of the HyperparameterTuningJob resource - to be deleted. Format: - ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class CancelHyperparameterTuningJobRequest(proto.Message): - r"""Request message for - [JobService.CancelHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.CancelHyperparameterTuningJob]. - - Attributes: - name (str): - Required. The name of the HyperparameterTuningJob to cancel. - Format: - ``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class CreateBatchPredictionJobRequest(proto.Message): - r"""Request message for - [JobService.CreateBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.CreateBatchPredictionJob]. - - Attributes: - parent (str): - Required. The resource name of the Location to create the - BatchPredictionJob in. Format: - ``projects/{project}/locations/{location}`` - batch_prediction_job (google.cloud.aiplatform_v1beta1.types.BatchPredictionJob): - Required. The BatchPredictionJob to create. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - batch_prediction_job = proto.Field( - proto.MESSAGE, - number=2, - message=gca_batch_prediction_job.BatchPredictionJob, - ) - - -class GetBatchPredictionJobRequest(proto.Message): - r"""Request message for - [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.GetBatchPredictionJob]. - - Attributes: - name (str): - Required. The name of the BatchPredictionJob resource. - Format: - ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListBatchPredictionJobsRequest(proto.Message): - r"""Request message for - [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1beta1.JobService.ListBatchPredictionJobs]. - - Attributes: - parent (str): - Required. The resource name of the Location to list the - BatchPredictionJobs from. Format: - ``projects/{project}/locations/{location}`` - filter (str): - The standard list filter. - - Supported fields: - - - ``display_name`` supports = and !=. - - - ``state`` supports = and !=. - - - ``model_display_name`` supports = and != - - Some examples of using the filter are: - - - ``state="JOB_STATE_SUCCEEDED" AND display_name="my_job"`` - - - ``state="JOB_STATE_RUNNING" OR display_name="my_job"`` - - - ``NOT display_name="my_job"`` - - - ``state="JOB_STATE_FAILED"`` - page_size (int): - The standard list page size. - page_token (str): - The standard list page token. Typically obtained via - [ListBatchPredictionJobsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListBatchPredictionJobsResponse.next_page_token] - of the previous - [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1beta1.JobService.ListBatchPredictionJobs] - call. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=5, - message=field_mask_pb2.FieldMask, - ) - - -class ListBatchPredictionJobsResponse(proto.Message): - r"""Response message for - [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1beta1.JobService.ListBatchPredictionJobs] - - Attributes: - batch_prediction_jobs (Sequence[google.cloud.aiplatform_v1beta1.types.BatchPredictionJob]): - List of BatchPredictionJobs in the requested - page. - next_page_token (str): - A token to retrieve the next page of results. Pass to - [ListBatchPredictionJobsRequest.page_token][google.cloud.aiplatform.v1beta1.ListBatchPredictionJobsRequest.page_token] - to obtain that page. - """ - - @property - def raw_page(self): - return self - - batch_prediction_jobs = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_batch_prediction_job.BatchPredictionJob, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class DeleteBatchPredictionJobRequest(proto.Message): - r"""Request message for - [JobService.DeleteBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.DeleteBatchPredictionJob]. - - Attributes: - name (str): - Required. The name of the BatchPredictionJob resource to be - deleted. Format: - ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class CancelBatchPredictionJobRequest(proto.Message): - r"""Request message for - [JobService.CancelBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.CancelBatchPredictionJob]. - - Attributes: - name (str): - Required. The name of the BatchPredictionJob to cancel. - Format: - ``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class CreateModelDeploymentMonitoringJobRequest(proto.Message): - r"""Request message for - [JobService.CreateModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.CreateModelDeploymentMonitoringJob]. - - Attributes: - parent (str): - Required. The parent of the ModelDeploymentMonitoringJob. - Format: ``projects/{project}/locations/{location}`` - model_deployment_monitoring_job (google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob): - Required. The ModelDeploymentMonitoringJob to - create - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - model_deployment_monitoring_job = proto.Field( - proto.MESSAGE, - number=2, - message=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob, - ) - - -class SearchModelDeploymentMonitoringStatsAnomaliesRequest(proto.Message): - r"""Request message for - [JobService.SearchModelDeploymentMonitoringStatsAnomalies][google.cloud.aiplatform.v1beta1.JobService.SearchModelDeploymentMonitoringStatsAnomalies]. - - Attributes: - model_deployment_monitoring_job (str): - Required. ModelDeploymentMonitoring Job resource name. - Format: - \`projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job} - deployed_model_id (str): - Required. The DeployedModel ID of the - [ModelDeploymentMonitoringObjectiveConfig.deployed_model_id]. - feature_display_name (str): - The feature display name. If specified, only return the - stats belonging to this feature. Format: - [ModelMonitoringStatsAnomalies.FeatureHistoricStatsAnomalies.feature_display_name][google.cloud.aiplatform.v1beta1.ModelMonitoringStatsAnomalies.FeatureHistoricStatsAnomalies.feature_display_name], - example: "user_destination". - objectives (Sequence[google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesRequest.StatsAnomaliesObjective]): - Required. Objectives of the stats to - retrieve. - page_size (int): - The standard list page size. - page_token (str): - A page token received from a previous - [JobService.SearchModelDeploymentMonitoringStatsAnomalies][google.cloud.aiplatform.v1beta1.JobService.SearchModelDeploymentMonitoringStatsAnomalies] - call. - start_time (google.protobuf.timestamp_pb2.Timestamp): - The earliest timestamp of stats being - generated. If not set, indicates fetching stats - till the earliest possible one. - end_time (google.protobuf.timestamp_pb2.Timestamp): - The latest timestamp of stats being - generated. If not set, indicates feching stats - till the latest possible one. - """ - - class StatsAnomaliesObjective(proto.Message): - r"""Stats requested for specific objective. - - Attributes: - type_ (google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringObjectiveType): - - top_feature_count (int): - If set, all attribution scores between - [SearchModelDeploymentMonitoringStatsAnomaliesRequest.start_time][google.cloud.aiplatform.v1beta1.SearchModelDeploymentMonitoringStatsAnomaliesRequest.start_time] - and - [SearchModelDeploymentMonitoringStatsAnomaliesRequest.end_time][google.cloud.aiplatform.v1beta1.SearchModelDeploymentMonitoringStatsAnomaliesRequest.end_time] - are fetched, and page token doesn't take affect in this - case. Only used to retrieve attribution score for the top - Features which has the highest attribution score in the - latest monitoring run. - """ - - type_ = proto.Field( - proto.ENUM, - number=1, - enum=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringObjectiveType, - ) - top_feature_count = proto.Field( - proto.INT32, - number=4, - ) - - model_deployment_monitoring_job = proto.Field( - proto.STRING, - number=1, - ) - deployed_model_id = proto.Field( - proto.STRING, - number=2, - ) - feature_display_name = proto.Field( - proto.STRING, - number=3, - ) - objectives = proto.RepeatedField( - proto.MESSAGE, - number=4, - message=StatsAnomaliesObjective, - ) - page_size = proto.Field( - proto.INT32, - number=5, - ) - page_token = proto.Field( - proto.STRING, - number=6, - ) - start_time = proto.Field( - proto.MESSAGE, - number=7, - message=timestamp_pb2.Timestamp, - ) - end_time = proto.Field( - proto.MESSAGE, - number=8, - message=timestamp_pb2.Timestamp, - ) - - -class SearchModelDeploymentMonitoringStatsAnomaliesResponse(proto.Message): - r"""Response message for - [JobService.SearchModelDeploymentMonitoringStatsAnomalies][google.cloud.aiplatform.v1beta1.JobService.SearchModelDeploymentMonitoringStatsAnomalies]. - - Attributes: - monitoring_stats (Sequence[google.cloud.aiplatform_v1beta1.types.ModelMonitoringStatsAnomalies]): - Stats retrieved for requested objectives. There are at most - 1000 - [ModelMonitoringStatsAnomalies.FeatureHistoricStatsAnomalies.prediction_stats][google.cloud.aiplatform.v1beta1.ModelMonitoringStatsAnomalies.FeatureHistoricStatsAnomalies.prediction_stats] - in the response. - next_page_token (str): - The page token that can be used by the next - [JobService.SearchModelDeploymentMonitoringStatsAnomalies][google.cloud.aiplatform.v1beta1.JobService.SearchModelDeploymentMonitoringStatsAnomalies] - call. - """ - - @property - def raw_page(self): - return self - - monitoring_stats = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class GetModelDeploymentMonitoringJobRequest(proto.Message): - r"""Request message for - [JobService.GetModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.GetModelDeploymentMonitoringJob]. - - Attributes: - name (str): - Required. The resource name of the - ModelDeploymentMonitoringJob. Format: - ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListModelDeploymentMonitoringJobsRequest(proto.Message): - r"""Request message for - [JobService.ListModelDeploymentMonitoringJobs][google.cloud.aiplatform.v1beta1.JobService.ListModelDeploymentMonitoringJobs]. - - Attributes: - parent (str): - Required. The parent of the ModelDeploymentMonitoringJob. - Format: ``projects/{project}/locations/{location}`` - filter (str): - The standard list filter. - page_size (int): - The standard list page size. - page_token (str): - The standard list page token. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=5, - message=field_mask_pb2.FieldMask, - ) - - -class ListModelDeploymentMonitoringJobsResponse(proto.Message): - r"""Response message for - [JobService.ListModelDeploymentMonitoringJobs][google.cloud.aiplatform.v1beta1.JobService.ListModelDeploymentMonitoringJobs]. - - Attributes: - model_deployment_monitoring_jobs (Sequence[google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob]): - A list of ModelDeploymentMonitoringJobs that - matches the specified filter in the request. - next_page_token (str): - The standard List next-page token. - """ - - @property - def raw_page(self): - return self - - model_deployment_monitoring_jobs = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class UpdateModelDeploymentMonitoringJobRequest(proto.Message): - r"""Request message for - [JobService.UpdateModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.UpdateModelDeploymentMonitoringJob]. - - Attributes: - model_deployment_monitoring_job (google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob): - Required. The model monitoring configuration - which replaces the resource on the server. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The update mask is used to specify the fields to - be overwritten in the ModelDeploymentMonitoringJob resource - by the update. The fields specified in the update_mask are - relative to the resource, not the full request. A field will - be overwritten if it is in the mask. If the user does not - provide a mask then only the non-empty fields present in the - request will be overwritten. Set the update_mask to ``*`` to - override all fields. For the objective config, the user can - either provide the update mask for - model_deployment_monitoring_objective_configs or any - combination of its nested fields, such as: - model_deployment_monitoring_objective_configs.objective_config.training_dataset. - - Updatable fields: - - - ``display_name`` - - ``model_deployment_monitoring_schedule_config`` - - ``model_monitoring_alert_config`` - - ``logging_sampling_strategy`` - - ``labels`` - - ``log_ttl`` - - ``enable_monitoring_pipeline_logs`` . and - - ``model_deployment_monitoring_objective_configs`` . or - - ``model_deployment_monitoring_objective_configs.objective_config.training_dataset`` - - ``model_deployment_monitoring_objective_configs.objective_config.training_prediction_skew_detection_config`` - - ``model_deployment_monitoring_objective_configs.objective_config.prediction_drift_detection_config`` - """ - - model_deployment_monitoring_job = proto.Field( - proto.MESSAGE, - number=1, - message=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob, - ) - update_mask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - - -class DeleteModelDeploymentMonitoringJobRequest(proto.Message): - r"""Request message for - [JobService.DeleteModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.DeleteModelDeploymentMonitoringJob]. - - Attributes: - name (str): - Required. The resource name of the model monitoring job to - delete. Format: - ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class PauseModelDeploymentMonitoringJobRequest(proto.Message): - r"""Request message for - [JobService.PauseModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.PauseModelDeploymentMonitoringJob]. - - Attributes: - name (str): - Required. The resource name of the - ModelDeploymentMonitoringJob to pause. Format: - ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ResumeModelDeploymentMonitoringJobRequest(proto.Message): - r"""Request message for - [JobService.ResumeModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.ResumeModelDeploymentMonitoringJob]. - - Attributes: - name (str): - Required. The resource name of the - ModelDeploymentMonitoringJob to resume. Format: - ``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class UpdateModelDeploymentMonitoringJobOperationMetadata(proto.Message): - r"""Runtime operation information for - [JobService.UpdateModelDeploymentMonitoringJob][google.cloud.aiplatform.v1beta1.JobService.UpdateModelDeploymentMonitoringJob]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - The operation generic information. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/job_state.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/job_state.py deleted file mode 100644 index 677ba3b002..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/job_state.py +++ /dev/null @@ -1,41 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'JobState', - }, -) - - -class JobState(proto.Enum): - r"""Describes the state of a job.""" - JOB_STATE_UNSPECIFIED = 0 - JOB_STATE_QUEUED = 1 - JOB_STATE_PENDING = 2 - JOB_STATE_RUNNING = 3 - JOB_STATE_SUCCEEDED = 4 - JOB_STATE_FAILED = 5 - JOB_STATE_CANCELLING = 6 - JOB_STATE_CANCELLED = 7 - JOB_STATE_PAUSED = 8 - JOB_STATE_EXPIRED = 9 - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/lineage_subgraph.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/lineage_subgraph.py deleted file mode 100644 index 597674dbab..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/lineage_subgraph.py +++ /dev/null @@ -1,62 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import artifact -from google.cloud.aiplatform_v1beta1.types import event -from google.cloud.aiplatform_v1beta1.types import execution - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'LineageSubgraph', - }, -) - - -class LineageSubgraph(proto.Message): - r"""A subgraph of the overall lineage graph. Event edges connect - Artifact and Execution nodes. - - Attributes: - artifacts (Sequence[google.cloud.aiplatform_v1beta1.types.Artifact]): - The Artifact nodes in the subgraph. - executions (Sequence[google.cloud.aiplatform_v1beta1.types.Execution]): - The Execution nodes in the subgraph. - events (Sequence[google.cloud.aiplatform_v1beta1.types.Event]): - The Event edges between Artifacts and - Executions in the subgraph. - """ - - artifacts = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=artifact.Artifact, - ) - executions = proto.RepeatedField( - proto.MESSAGE, - number=2, - message=execution.Execution, - ) - events = proto.RepeatedField( - proto.MESSAGE, - number=3, - message=event.Event, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/machine_resources.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/machine_resources.py deleted file mode 100644 index fc79c5fe34..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/machine_resources.py +++ /dev/null @@ -1,310 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import accelerator_type as gca_accelerator_type - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'MachineSpec', - 'DedicatedResources', - 'AutomaticResources', - 'BatchDedicatedResources', - 'ResourcesConsumed', - 'DiskSpec', - 'AutoscalingMetricSpec', - }, -) - - -class MachineSpec(proto.Message): - r"""Specification of a single machine. - - Attributes: - machine_type (str): - Immutable. The type of the machine. - - See the `list of machine types supported for - prediction `__ - - See the `list of machine types supported for custom - training `__. - - For - [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] - this field is optional, and the default value is - ``n1-standard-2``. For - [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob] - or as part of - [WorkerPoolSpec][google.cloud.aiplatform.v1beta1.WorkerPoolSpec] - this field is required. - accelerator_type (google.cloud.aiplatform_v1beta1.types.AcceleratorType): - Immutable. The type of accelerator(s) that may be attached - to the machine as per - [accelerator_count][google.cloud.aiplatform.v1beta1.MachineSpec.accelerator_count]. - accelerator_count (int): - The number of accelerators to attach to the - machine. - """ - - machine_type = proto.Field( - proto.STRING, - number=1, - ) - accelerator_type = proto.Field( - proto.ENUM, - number=2, - enum=gca_accelerator_type.AcceleratorType, - ) - accelerator_count = proto.Field( - proto.INT32, - number=3, - ) - - -class DedicatedResources(proto.Message): - r"""A description of resources that are dedicated to a - DeployedModel, and that need a higher degree of manual - configuration. - - Attributes: - machine_spec (google.cloud.aiplatform_v1beta1.types.MachineSpec): - Required. Immutable. The specification of a - single machine used by the prediction. - min_replica_count (int): - Required. Immutable. The minimum number of - machine replicas this DeployedModel will be - always deployed on. This value must be greater - than or equal to 1. - If traffic against the DeployedModel increases, - it may dynamically be deployed onto more - replicas, and as traffic decreases, some of - these extra replicas may be freed. - max_replica_count (int): - Immutable. The maximum number of replicas this DeployedModel - may be deployed on when the traffic against it increases. If - the requested value is too large, the deployment will error, - but if deployment succeeds then the ability to scale the - model to that many replicas is guaranteed (barring service - outages). If traffic against the DeployedModel increases - beyond what its replicas at maximum may handle, a portion of - the traffic will be dropped. If this value is not provided, - will use - [min_replica_count][google.cloud.aiplatform.v1beta1.DedicatedResources.min_replica_count] - as the default value. - autoscaling_metric_specs (Sequence[google.cloud.aiplatform_v1beta1.types.AutoscalingMetricSpec]): - Immutable. The metric specifications that overrides a - resource utilization metric (CPU utilization, accelerator's - duty cycle, and so on) target value (default to 60 if not - set). At most one entry is allowed per metric. - - If - [machine_spec.accelerator_count][google.cloud.aiplatform.v1beta1.MachineSpec.accelerator_count] - is above 0, the autoscaling will be based on both CPU - utilization and accelerator's duty cycle metrics and scale - up when either metrics exceeds its target value while scale - down if both metrics are under their target value. The - default target value is 60 for both metrics. - - If - [machine_spec.accelerator_count][google.cloud.aiplatform.v1beta1.MachineSpec.accelerator_count] - is 0, the autoscaling will be based on CPU utilization - metric only with default target value 60 if not explicitly - set. - - For example, in the case of Online Prediction, if you want - to override target CPU utilization to 80, you should set - [autoscaling_metric_specs.metric_name][google.cloud.aiplatform.v1beta1.AutoscalingMetricSpec.metric_name] - to - ``aiplatform.googleapis.com/prediction/online/cpu/utilization`` - and - [autoscaling_metric_specs.target][google.cloud.aiplatform.v1beta1.AutoscalingMetricSpec.target] - to ``80``. - """ - - machine_spec = proto.Field( - proto.MESSAGE, - number=1, - message='MachineSpec', - ) - min_replica_count = proto.Field( - proto.INT32, - number=2, - ) - max_replica_count = proto.Field( - proto.INT32, - number=3, - ) - autoscaling_metric_specs = proto.RepeatedField( - proto.MESSAGE, - number=4, - message='AutoscalingMetricSpec', - ) - - -class AutomaticResources(proto.Message): - r"""A description of resources that to large degree are decided - by Vertex AI, and require only a modest additional - configuration. Each Model supporting these resources documents - its specific guidelines. - - Attributes: - min_replica_count (int): - Immutable. The minimum number of replicas this DeployedModel - will be always deployed on. If traffic against it increases, - it may dynamically be deployed onto more replicas up to - [max_replica_count][google.cloud.aiplatform.v1beta1.AutomaticResources.max_replica_count], - and as traffic decreases, some of these extra replicas may - be freed. If the requested value is too large, the - deployment will error. - max_replica_count (int): - Immutable. The maximum number of replicas - this DeployedModel may be deployed on when the - traffic against it increases. If the requested - value is too large, the deployment will error, - but if deployment succeeds then the ability to - scale the model to that many replicas is - guaranteed (barring service outages). If traffic - against the DeployedModel increases beyond what - its replicas at maximum may handle, a portion of - the traffic will be dropped. If this value is - not provided, a no upper bound for scaling under - heavy traffic will be assume, though Vertex AI - may be unable to scale beyond certain replica - number. - """ - - min_replica_count = proto.Field( - proto.INT32, - number=1, - ) - max_replica_count = proto.Field( - proto.INT32, - number=2, - ) - - -class BatchDedicatedResources(proto.Message): - r"""A description of resources that are used for performing batch - operations, are dedicated to a Model, and need manual - configuration. - - Attributes: - machine_spec (google.cloud.aiplatform_v1beta1.types.MachineSpec): - Required. Immutable. The specification of a - single machine. - starting_replica_count (int): - Immutable. The number of machine replicas used at the start - of the batch operation. If not set, Vertex AI decides - starting number, not greater than - [max_replica_count][google.cloud.aiplatform.v1beta1.BatchDedicatedResources.max_replica_count] - max_replica_count (int): - Immutable. The maximum number of machine - replicas the batch operation may be scaled to. - The default value is 10. - """ - - machine_spec = proto.Field( - proto.MESSAGE, - number=1, - message='MachineSpec', - ) - starting_replica_count = proto.Field( - proto.INT32, - number=2, - ) - max_replica_count = proto.Field( - proto.INT32, - number=3, - ) - - -class ResourcesConsumed(proto.Message): - r"""Statistics information about resource consumption. - - Attributes: - replica_hours (float): - Output only. The number of replica hours - used. Note that many replicas may run in - parallel, and additionally any given work may be - queued for some time. Therefore this value is - not strictly related to wall time. - """ - - replica_hours = proto.Field( - proto.DOUBLE, - number=1, - ) - - -class DiskSpec(proto.Message): - r"""Represents the spec of disk options. - - Attributes: - boot_disk_type (str): - Type of the boot disk (default is "pd-ssd"). - Valid values: "pd-ssd" (Persistent Disk Solid - State Drive) or "pd-standard" (Persistent Disk - Hard Disk Drive). - boot_disk_size_gb (int): - Size in GB of the boot disk (default is - 100GB). - """ - - boot_disk_type = proto.Field( - proto.STRING, - number=1, - ) - boot_disk_size_gb = proto.Field( - proto.INT32, - number=2, - ) - - -class AutoscalingMetricSpec(proto.Message): - r"""The metric specification that defines the target resource - utilization (CPU utilization, accelerator's duty cycle, and so - on) for calculating the desired replica count. - - Attributes: - metric_name (str): - Required. The resource metric name. Supported metrics: - - - For Online Prediction: - - ``aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle`` - - ``aiplatform.googleapis.com/prediction/online/cpu/utilization`` - target (int): - The target resource utilization in percentage - (1% - 100%) for the given metric; once the real - usage deviates from the target by a certain - percentage, the machine replicas change. The - default value is 60 (representing 60%) if not - provided. - """ - - metric_name = proto.Field( - proto.STRING, - number=1, - ) - target = proto.Field( - proto.INT32, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/manual_batch_tuning_parameters.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/manual_batch_tuning_parameters.py deleted file mode 100644 index 859646e199..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/manual_batch_tuning_parameters.py +++ /dev/null @@ -1,50 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'ManualBatchTuningParameters', - }, -) - - -class ManualBatchTuningParameters(proto.Message): - r"""Manual batch tuning parameters. - - Attributes: - batch_size (int): - Immutable. The number of the records (e.g. - instances) of the operation given in each batch - to a machine replica. Machine type, and size of - a single record should be considered when - setting this parameter, higher value speeds up - the batch operation's execution, but too high - value will result in a whole batch not fitting - in a machine's memory, and the whole operation - will fail. - The default value is 4. - """ - - batch_size = proto.Field( - proto.INT32, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/metadata_schema.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/metadata_schema.py deleted file mode 100644 index 857139b3a7..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/metadata_schema.py +++ /dev/null @@ -1,96 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'MetadataSchema', - }, -) - - -class MetadataSchema(proto.Message): - r"""Instance of a general MetadataSchema. - - Attributes: - name (str): - Output only. The resource name of the - MetadataSchema. - schema_version (str): - The version of the MetadataSchema. The version's format must - match the following regular expression: - ``^[0-9]+[.][0-9]+[.][0-9]+$``, which would allow to - order/compare different versions. Example: 1.0.0, 1.0.1, - etc. - schema (str): - Required. The raw YAML string representation of the - MetadataSchema. The combination of [MetadataSchema.version] - and the schema name given by ``title`` in - [MetadataSchema.schema] must be unique within a - MetadataStore. - - The schema is defined as an OpenAPI 3.0.2 `MetadataSchema - Object `__ - schema_type (google.cloud.aiplatform_v1beta1.types.MetadataSchema.MetadataSchemaType): - The type of the MetadataSchema. This is a - property that identifies which metadata types - will use the MetadataSchema. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - MetadataSchema was created. - description (str): - Description of the Metadata Schema - """ - class MetadataSchemaType(proto.Enum): - r"""Describes the type of the MetadataSchema.""" - METADATA_SCHEMA_TYPE_UNSPECIFIED = 0 - ARTIFACT_TYPE = 1 - EXECUTION_TYPE = 2 - CONTEXT_TYPE = 3 - - name = proto.Field( - proto.STRING, - number=1, - ) - schema_version = proto.Field( - proto.STRING, - number=2, - ) - schema = proto.Field( - proto.STRING, - number=3, - ) - schema_type = proto.Field( - proto.ENUM, - number=4, - enum=MetadataSchemaType, - ) - create_time = proto.Field( - proto.MESSAGE, - number=5, - message=timestamp_pb2.Timestamp, - ) - description = proto.Field( - proto.STRING, - number=6, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/metadata_service.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/metadata_service.py deleted file mode 100644 index 0472cd56cd..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/metadata_service.py +++ /dev/null @@ -1,1479 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import artifact as gca_artifact -from google.cloud.aiplatform_v1beta1.types import context as gca_context -from google.cloud.aiplatform_v1beta1.types import event -from google.cloud.aiplatform_v1beta1.types import execution as gca_execution -from google.cloud.aiplatform_v1beta1.types import metadata_schema as gca_metadata_schema -from google.cloud.aiplatform_v1beta1.types import metadata_store as gca_metadata_store -from google.cloud.aiplatform_v1beta1.types import operation -from google.protobuf import field_mask_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'CreateMetadataStoreRequest', - 'CreateMetadataStoreOperationMetadata', - 'GetMetadataStoreRequest', - 'ListMetadataStoresRequest', - 'ListMetadataStoresResponse', - 'DeleteMetadataStoreRequest', - 'DeleteMetadataStoreOperationMetadata', - 'CreateArtifactRequest', - 'GetArtifactRequest', - 'ListArtifactsRequest', - 'ListArtifactsResponse', - 'UpdateArtifactRequest', - 'DeleteArtifactRequest', - 'PurgeArtifactsRequest', - 'PurgeArtifactsResponse', - 'PurgeArtifactsMetadata', - 'CreateContextRequest', - 'GetContextRequest', - 'ListContextsRequest', - 'ListContextsResponse', - 'UpdateContextRequest', - 'DeleteContextRequest', - 'PurgeContextsRequest', - 'PurgeContextsResponse', - 'PurgeContextsMetadata', - 'AddContextArtifactsAndExecutionsRequest', - 'AddContextArtifactsAndExecutionsResponse', - 'AddContextChildrenRequest', - 'AddContextChildrenResponse', - 'QueryContextLineageSubgraphRequest', - 'CreateExecutionRequest', - 'GetExecutionRequest', - 'ListExecutionsRequest', - 'ListExecutionsResponse', - 'UpdateExecutionRequest', - 'DeleteExecutionRequest', - 'PurgeExecutionsRequest', - 'PurgeExecutionsResponse', - 'PurgeExecutionsMetadata', - 'AddExecutionEventsRequest', - 'AddExecutionEventsResponse', - 'QueryExecutionInputsAndOutputsRequest', - 'CreateMetadataSchemaRequest', - 'GetMetadataSchemaRequest', - 'ListMetadataSchemasRequest', - 'ListMetadataSchemasResponse', - 'QueryArtifactLineageSubgraphRequest', - }, -) - - -class CreateMetadataStoreRequest(proto.Message): - r"""Request message for - [MetadataService.CreateMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataStore]. - - Attributes: - parent (str): - Required. The resource name of the Location where the - MetadataStore should be created. Format: - ``projects/{project}/locations/{location}/`` - metadata_store (google.cloud.aiplatform_v1beta1.types.MetadataStore): - Required. The MetadataStore to create. - metadata_store_id (str): - The {metadatastore} portion of the resource name with the - format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - If not provided, the MetadataStore's ID will be a UUID - generated by the service. Must be 4-128 characters in - length. Valid characters are ``/[a-z][0-9]-/``. Must be - unique across all MetadataStores in the parent Location. - (Otherwise the request will fail with ALREADY_EXISTS, or - PERMISSION_DENIED if the caller can't view the preexisting - MetadataStore.) - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - metadata_store = proto.Field( - proto.MESSAGE, - number=2, - message=gca_metadata_store.MetadataStore, - ) - metadata_store_id = proto.Field( - proto.STRING, - number=3, - ) - - -class CreateMetadataStoreOperationMetadata(proto.Message): - r"""Details of operations that perform - [MetadataService.CreateMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataStore]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - Operation metadata for creating a - MetadataStore. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class GetMetadataStoreRequest(proto.Message): - r"""Request message for - [MetadataService.GetMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.GetMetadataStore]. - - Attributes: - name (str): - Required. The resource name of the MetadataStore to - retrieve. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListMetadataStoresRequest(proto.Message): - r"""Request message for - [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataStores]. - - Attributes: - parent (str): - Required. The Location whose MetadataStores should be - listed. Format: ``projects/{project}/locations/{location}`` - page_size (int): - The maximum number of Metadata Stores to - return. The service may return fewer. - Must be in range 1-1000, inclusive. Defaults to - 100. - page_token (str): - A page token, received from a previous - [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataStores] - call. Provide this to retrieve the subsequent page. - - When paginating, all other provided parameters must match - the call that provided the page token. (Otherwise the - request will fail with INVALID_ARGUMENT error.) - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - page_size = proto.Field( - proto.INT32, - number=2, - ) - page_token = proto.Field( - proto.STRING, - number=3, - ) - - -class ListMetadataStoresResponse(proto.Message): - r"""Response message for - [MetadataService.ListMetadataStores][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataStores]. - - Attributes: - metadata_stores (Sequence[google.cloud.aiplatform_v1beta1.types.MetadataStore]): - The MetadataStores found for the Location. - next_page_token (str): - A token, which can be sent as - [ListMetadataStoresRequest.page_token][google.cloud.aiplatform.v1beta1.ListMetadataStoresRequest.page_token] - to retrieve the next page. If this field is not populated, - there are no subsequent pages. - """ - - @property - def raw_page(self): - return self - - metadata_stores = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_metadata_store.MetadataStore, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class DeleteMetadataStoreRequest(proto.Message): - r"""Request message for - [MetadataService.DeleteMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.DeleteMetadataStore]. - - Attributes: - name (str): - Required. The resource name of the MetadataStore to delete. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - force (bool): - Deprecated: Field is no longer supported. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - force = proto.Field( - proto.BOOL, - number=2, - ) - - -class DeleteMetadataStoreOperationMetadata(proto.Message): - r"""Details of operations that perform - [MetadataService.DeleteMetadataStore][google.cloud.aiplatform.v1beta1.MetadataService.DeleteMetadataStore]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - Operation metadata for deleting a - MetadataStore. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class CreateArtifactRequest(proto.Message): - r"""Request message for - [MetadataService.CreateArtifact][google.cloud.aiplatform.v1beta1.MetadataService.CreateArtifact]. - - Attributes: - parent (str): - Required. The resource name of the MetadataStore where the - Artifact should be created. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - artifact (google.cloud.aiplatform_v1beta1.types.Artifact): - Required. The Artifact to create. - artifact_id (str): - The {artifact} portion of the resource name with the format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` - If not provided, the Artifact's ID will be a UUID generated - by the service. Must be 4-128 characters in length. Valid - characters are ``/[a-z][0-9]-/``. Must be unique across all - Artifacts in the parent MetadataStore. (Otherwise the - request will fail with ALREADY_EXISTS, or PERMISSION_DENIED - if the caller can't view the preexisting Artifact.) - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - artifact = proto.Field( - proto.MESSAGE, - number=2, - message=gca_artifact.Artifact, - ) - artifact_id = proto.Field( - proto.STRING, - number=3, - ) - - -class GetArtifactRequest(proto.Message): - r"""Request message for - [MetadataService.GetArtifact][google.cloud.aiplatform.v1beta1.MetadataService.GetArtifact]. - - Attributes: - name (str): - Required. The resource name of the Artifact to retrieve. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListArtifactsRequest(proto.Message): - r"""Request message for - [MetadataService.ListArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.ListArtifacts]. - - Attributes: - parent (str): - Required. The MetadataStore whose Artifacts should be - listed. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - page_size (int): - The maximum number of Artifacts to return. - The service may return fewer. Must be in range - 1-1000, inclusive. Defaults to 100. - page_token (str): - A page token, received from a previous - [MetadataService.ListArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.ListArtifacts] - call. Provide this to retrieve the subsequent page. - - When paginating, all other provided parameters must match - the call that provided the page token. (Otherwise the - request will fail with INVALID_ARGUMENT error.) - filter (str): - Filter specifying the boolean condition for the Artifacts to - satisfy in order to be part of the result set. The syntax to - define filter query is based on https://google.aip.dev/160. - The supported set of filters include the following: - - - **Attribute filtering**: For example: - ``display_name = "test"``. Supported fields include: - ``name``, ``display_name``, ``uri``, ``state``, - ``schema_title``, ``create_time``, and ``update_time``. - Time fields, such as ``create_time`` and ``update_time``, - require values specified in RFC-3339 format. For example: - ``create_time = "2020-11-19T11:30:00-04:00"`` - - **Metadata field**: To filter on metadata fields use - traversal operation as follows: - ``metadata..``. For example: - ``metadata.field_1.number_value = 10.0`` - - **Context based filtering**: To filter Artifacts based on - the contexts to which they belong, use the function - operator with the full resource name - ``in_context()``. For example: - ``in_context("projects//locations//metadataStores//contexts/")`` - - Each of the above supported filter types can be combined - together using logical operators (``AND`` & ``OR``). - - For example: - ``display_name = "test" AND metadata.field1.bool_value = true``. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - page_size = proto.Field( - proto.INT32, - number=2, - ) - page_token = proto.Field( - proto.STRING, - number=3, - ) - filter = proto.Field( - proto.STRING, - number=4, - ) - - -class ListArtifactsResponse(proto.Message): - r"""Response message for - [MetadataService.ListArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.ListArtifacts]. - - Attributes: - artifacts (Sequence[google.cloud.aiplatform_v1beta1.types.Artifact]): - The Artifacts retrieved from the - MetadataStore. - next_page_token (str): - A token, which can be sent as - [ListArtifactsRequest.page_token][google.cloud.aiplatform.v1beta1.ListArtifactsRequest.page_token] - to retrieve the next page. If this field is not populated, - there are no subsequent pages. - """ - - @property - def raw_page(self): - return self - - artifacts = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_artifact.Artifact, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class UpdateArtifactRequest(proto.Message): - r"""Request message for - [MetadataService.UpdateArtifact][google.cloud.aiplatform.v1beta1.MetadataService.UpdateArtifact]. - - Attributes: - artifact (google.cloud.aiplatform_v1beta1.types.Artifact): - Required. The Artifact containing updates. The Artifact's - [Artifact.name][google.cloud.aiplatform.v1beta1.Artifact.name] - field is used to identify the Artifact to be updated. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. A FieldMask indicating which fields - should be updated. Functionality of this field - is not yet supported. - allow_missing (bool): - If set to true, and the - [Artifact][google.cloud.aiplatform.v1beta1.Artifact] is not - found, a new - [Artifact][google.cloud.aiplatform.v1beta1.Artifact] is - created. - """ - - artifact = proto.Field( - proto.MESSAGE, - number=1, - message=gca_artifact.Artifact, - ) - update_mask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - allow_missing = proto.Field( - proto.BOOL, - number=3, - ) - - -class DeleteArtifactRequest(proto.Message): - r"""Request message for - [MetadataService.DeleteArtifact][google.cloud.aiplatform.v1beta1.MetadataService.DeleteArtifact]. - - Attributes: - name (str): - Required. The resource name of the Artifact to delete. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` - etag (str): - Optional. The etag of the Artifact to delete. If this is - provided, it must match the server's etag. Otherwise, the - request will fail with a FAILED_PRECONDITION. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - etag = proto.Field( - proto.STRING, - number=2, - ) - - -class PurgeArtifactsRequest(proto.Message): - r"""Request message for - [MetadataService.PurgeArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.PurgeArtifacts]. - - Attributes: - parent (str): - Required. The metadata store to purge Artifacts from. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - filter (str): - Required. A required filter matching the Artifacts to be - purged. E.g., ``update_time <= 2020-11-19T11:30:00-04:00``. - force (bool): - Optional. Flag to indicate to actually perform the purge. If - ``force`` is set to false, the method will return a sample - of Artifact names that would be deleted. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - force = proto.Field( - proto.BOOL, - number=3, - ) - - -class PurgeArtifactsResponse(proto.Message): - r"""Response message for - [MetadataService.PurgeArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.PurgeArtifacts]. - - Attributes: - purge_count (int): - The number of Artifacts that this request deleted (or, if - ``force`` is false, the number of Artifacts that will be - deleted). This can be an estimate. - purge_sample (Sequence[str]): - A sample of the Artifact names that will be deleted. Only - populated if ``force`` is set to false. The maximum number - of samples is 100 (it is possible to return fewer). - """ - - purge_count = proto.Field( - proto.INT64, - number=1, - ) - purge_sample = proto.RepeatedField( - proto.STRING, - number=2, - ) - - -class PurgeArtifactsMetadata(proto.Message): - r"""Details of operations that perform - [MetadataService.PurgeArtifacts][google.cloud.aiplatform.v1beta1.MetadataService.PurgeArtifacts]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - Operation metadata for purging Artifacts. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class CreateContextRequest(proto.Message): - r"""Request message for - [MetadataService.CreateContext][google.cloud.aiplatform.v1beta1.MetadataService.CreateContext]. - - Attributes: - parent (str): - Required. The resource name of the MetadataStore where the - Context should be created. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - context (google.cloud.aiplatform_v1beta1.types.Context): - Required. The Context to create. - context_id (str): - The {context} portion of the resource name with the format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}``. - If not provided, the Context's ID will be a UUID generated - by the service. Must be 4-128 characters in length. Valid - characters are ``/[a-z][0-9]-/``. Must be unique across all - Contexts in the parent MetadataStore. (Otherwise the request - will fail with ALREADY_EXISTS, or PERMISSION_DENIED if the - caller can't view the preexisting Context.) - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - context = proto.Field( - proto.MESSAGE, - number=2, - message=gca_context.Context, - ) - context_id = proto.Field( - proto.STRING, - number=3, - ) - - -class GetContextRequest(proto.Message): - r"""Request message for - [MetadataService.GetContext][google.cloud.aiplatform.v1beta1.MetadataService.GetContext]. - - Attributes: - name (str): - Required. The resource name of the Context to retrieve. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListContextsRequest(proto.Message): - r"""Request message for - [MetadataService.ListContexts][google.cloud.aiplatform.v1beta1.MetadataService.ListContexts] - - Attributes: - parent (str): - Required. The MetadataStore whose Contexts should be listed. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - page_size (int): - The maximum number of Contexts to return. The - service may return fewer. Must be in range - 1-1000, inclusive. Defaults to 100. - page_token (str): - A page token, received from a previous - [MetadataService.ListContexts][google.cloud.aiplatform.v1beta1.MetadataService.ListContexts] - call. Provide this to retrieve the subsequent page. - - When paginating, all other provided parameters must match - the call that provided the page token. (Otherwise the - request will fail with INVALID_ARGUMENT error.) - filter (str): - Filter specifying the boolean condition for the Contexts to - satisfy in order to be part of the result set. The syntax to - define filter query is based on https://google.aip.dev/160. - Following are the supported set of filters: - - - **Attribute filtering**: For example: - ``display_name = "test"``. Supported fields include: - ``name``, ``display_name``, ``schema_title``, - ``create_time``, and ``update_time``. Time fields, such - as ``create_time`` and ``update_time``, require values - specified in RFC-3339 format. For example: - ``create_time = "2020-11-19T11:30:00-04:00"``. - - - **Metadata field**: To filter on metadata fields use - traversal operation as follows: - ``metadata..``. For example: - ``metadata.field_1.number_value = 10.0``. - - - **Parent Child filtering**: To filter Contexts based on - parent-child relationship use the HAS operator as - follows: - - :: - - parent_contexts: - "projects//locations//metadataStores//contexts/" - child_contexts: - "projects//locations//metadataStores//contexts/" - - Each of the above supported filters can be combined together - using logical operators (``AND`` & ``OR``). - - For example: - ``display_name = "test" AND metadata.field1.bool_value = true``. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - page_size = proto.Field( - proto.INT32, - number=2, - ) - page_token = proto.Field( - proto.STRING, - number=3, - ) - filter = proto.Field( - proto.STRING, - number=4, - ) - - -class ListContextsResponse(proto.Message): - r"""Response message for - [MetadataService.ListContexts][google.cloud.aiplatform.v1beta1.MetadataService.ListContexts]. - - Attributes: - contexts (Sequence[google.cloud.aiplatform_v1beta1.types.Context]): - The Contexts retrieved from the - MetadataStore. - next_page_token (str): - A token, which can be sent as - [ListContextsRequest.page_token][google.cloud.aiplatform.v1beta1.ListContextsRequest.page_token] - to retrieve the next page. If this field is not populated, - there are no subsequent pages. - """ - - @property - def raw_page(self): - return self - - contexts = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_context.Context, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class UpdateContextRequest(proto.Message): - r"""Request message for - [MetadataService.UpdateContext][google.cloud.aiplatform.v1beta1.MetadataService.UpdateContext]. - - Attributes: - context (google.cloud.aiplatform_v1beta1.types.Context): - Required. The Context containing updates. The Context's - [Context.name][google.cloud.aiplatform.v1beta1.Context.name] - field is used to identify the Context to be updated. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. A FieldMask indicating which fields - should be updated. Functionality of this field - is not yet supported. - allow_missing (bool): - If set to true, and the - [Context][google.cloud.aiplatform.v1beta1.Context] is not - found, a new - [Context][google.cloud.aiplatform.v1beta1.Context] is - created. - """ - - context = proto.Field( - proto.MESSAGE, - number=1, - message=gca_context.Context, - ) - update_mask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - allow_missing = proto.Field( - proto.BOOL, - number=3, - ) - - -class DeleteContextRequest(proto.Message): - r"""Request message for - [MetadataService.DeleteContext][google.cloud.aiplatform.v1beta1.MetadataService.DeleteContext]. - - Attributes: - name (str): - Required. The resource name of the Context to delete. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` - force (bool): - The force deletion semantics is still - undefined. Users should not use this field. - etag (str): - Optional. The etag of the Context to delete. If this is - provided, it must match the server's etag. Otherwise, the - request will fail with a FAILED_PRECONDITION. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - force = proto.Field( - proto.BOOL, - number=2, - ) - etag = proto.Field( - proto.STRING, - number=3, - ) - - -class PurgeContextsRequest(proto.Message): - r"""Request message for - [MetadataService.PurgeContexts][google.cloud.aiplatform.v1beta1.MetadataService.PurgeContexts]. - - Attributes: - parent (str): - Required. The metadata store to purge Contexts from. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - filter (str): - Required. A required filter matching the Contexts to be - purged. E.g., ``update_time <= 2020-11-19T11:30:00-04:00``. - force (bool): - Optional. Flag to indicate to actually perform the purge. If - ``force`` is set to false, the method will return a sample - of Context names that would be deleted. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - force = proto.Field( - proto.BOOL, - number=3, - ) - - -class PurgeContextsResponse(proto.Message): - r"""Response message for - [MetadataService.PurgeContexts][google.cloud.aiplatform.v1beta1.MetadataService.PurgeContexts]. - - Attributes: - purge_count (int): - The number of Contexts that this request deleted (or, if - ``force`` is false, the number of Contexts that will be - deleted). This can be an estimate. - purge_sample (Sequence[str]): - A sample of the Context names that will be deleted. Only - populated if ``force`` is set to false. The maximum number - of samples is 100 (it is possible to return fewer). - """ - - purge_count = proto.Field( - proto.INT64, - number=1, - ) - purge_sample = proto.RepeatedField( - proto.STRING, - number=2, - ) - - -class PurgeContextsMetadata(proto.Message): - r"""Details of operations that perform - [MetadataService.PurgeContexts][google.cloud.aiplatform.v1beta1.MetadataService.PurgeContexts]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - Operation metadata for purging Contexts. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class AddContextArtifactsAndExecutionsRequest(proto.Message): - r"""Request message for - [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1beta1.MetadataService.AddContextArtifactsAndExecutions]. - - Attributes: - context (str): - Required. The resource name of the Context that the - Artifacts and Executions belong to. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` - artifacts (Sequence[str]): - The resource names of the Artifacts to attribute to the - Context. - - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` - executions (Sequence[str]): - The resource names of the Executions to associate with the - Context. - - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` - """ - - context = proto.Field( - proto.STRING, - number=1, - ) - artifacts = proto.RepeatedField( - proto.STRING, - number=2, - ) - executions = proto.RepeatedField( - proto.STRING, - number=3, - ) - - -class AddContextArtifactsAndExecutionsResponse(proto.Message): - r"""Response message for - [MetadataService.AddContextArtifactsAndExecutions][google.cloud.aiplatform.v1beta1.MetadataService.AddContextArtifactsAndExecutions]. - - """ - - -class AddContextChildrenRequest(proto.Message): - r"""Request message for - [MetadataService.AddContextChildren][google.cloud.aiplatform.v1beta1.MetadataService.AddContextChildren]. - - Attributes: - context (str): - Required. The resource name of the parent Context. - - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` - child_contexts (Sequence[str]): - The resource names of the child Contexts. - """ - - context = proto.Field( - proto.STRING, - number=1, - ) - child_contexts = proto.RepeatedField( - proto.STRING, - number=2, - ) - - -class AddContextChildrenResponse(proto.Message): - r"""Response message for - [MetadataService.AddContextChildren][google.cloud.aiplatform.v1beta1.MetadataService.AddContextChildren]. - - """ - - -class QueryContextLineageSubgraphRequest(proto.Message): - r"""Request message for - [MetadataService.QueryContextLineageSubgraph][google.cloud.aiplatform.v1beta1.MetadataService.QueryContextLineageSubgraph]. - - Attributes: - context (str): - Required. The resource name of the Context whose Artifacts - and Executions should be retrieved as a LineageSubgraph. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`` - - The request may error with FAILED_PRECONDITION if the number - of Artifacts, the number of Executions, or the number of - Events that would be returned for the Context exceeds 1000. - """ - - context = proto.Field( - proto.STRING, - number=1, - ) - - -class CreateExecutionRequest(proto.Message): - r"""Request message for - [MetadataService.CreateExecution][google.cloud.aiplatform.v1beta1.MetadataService.CreateExecution]. - - Attributes: - parent (str): - Required. The resource name of the MetadataStore where the - Execution should be created. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - execution (google.cloud.aiplatform_v1beta1.types.Execution): - Required. The Execution to create. - execution_id (str): - The {execution} portion of the resource name with the - format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` - If not provided, the Execution's ID will be a UUID generated - by the service. Must be 4-128 characters in length. Valid - characters are ``/[a-z][0-9]-/``. Must be unique across all - Executions in the parent MetadataStore. (Otherwise the - request will fail with ALREADY_EXISTS, or PERMISSION_DENIED - if the caller can't view the preexisting Execution.) - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - execution = proto.Field( - proto.MESSAGE, - number=2, - message=gca_execution.Execution, - ) - execution_id = proto.Field( - proto.STRING, - number=3, - ) - - -class GetExecutionRequest(proto.Message): - r"""Request message for - [MetadataService.GetExecution][google.cloud.aiplatform.v1beta1.MetadataService.GetExecution]. - - Attributes: - name (str): - Required. The resource name of the Execution to retrieve. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListExecutionsRequest(proto.Message): - r"""Request message for - [MetadataService.ListExecutions][google.cloud.aiplatform.v1beta1.MetadataService.ListExecutions]. - - Attributes: - parent (str): - Required. The MetadataStore whose Executions should be - listed. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - page_size (int): - The maximum number of Executions to return. - The service may return fewer. Must be in range - 1-1000, inclusive. Defaults to 100. - page_token (str): - A page token, received from a previous - [MetadataService.ListExecutions][google.cloud.aiplatform.v1beta1.MetadataService.ListExecutions] - call. Provide this to retrieve the subsequent page. - - When paginating, all other provided parameters must match - the call that provided the page token. (Otherwise the - request will fail with an INVALID_ARGUMENT error.) - filter (str): - Filter specifying the boolean condition for the Executions - to satisfy in order to be part of the result set. The syntax - to define filter query is based on - https://google.aip.dev/160. Following are the supported set - of filters: - - - **Attribute filtering**: For example: - ``display_name = "test"``. Supported fields include: - ``name``, ``display_name``, ``state``, ``schema_title``, - ``create_time``, and ``update_time``. Time fields, such - as ``create_time`` and ``update_time``, require values - specified in RFC-3339 format. For example: - ``create_time = "2020-11-19T11:30:00-04:00"``. - - **Metadata field**: To filter on metadata fields use - traversal operation as follows: - ``metadata..`` For example: - ``metadata.field_1.number_value = 10.0`` - - **Context based filtering**: To filter Executions based - on the contexts to which they belong use the function - operator with the full resource name: - ``in_context()``. For example: - ``in_context("projects//locations//metadataStores//contexts/")`` - - Each of the above supported filters can be combined together - using logical operators (``AND`` & ``OR``). For example: - ``display_name = "test" AND metadata.field1.bool_value = true``. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - page_size = proto.Field( - proto.INT32, - number=2, - ) - page_token = proto.Field( - proto.STRING, - number=3, - ) - filter = proto.Field( - proto.STRING, - number=4, - ) - - -class ListExecutionsResponse(proto.Message): - r"""Response message for - [MetadataService.ListExecutions][google.cloud.aiplatform.v1beta1.MetadataService.ListExecutions]. - - Attributes: - executions (Sequence[google.cloud.aiplatform_v1beta1.types.Execution]): - The Executions retrieved from the - MetadataStore. - next_page_token (str): - A token, which can be sent as - [ListExecutionsRequest.page_token][google.cloud.aiplatform.v1beta1.ListExecutionsRequest.page_token] - to retrieve the next page. If this field is not populated, - there are no subsequent pages. - """ - - @property - def raw_page(self): - return self - - executions = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_execution.Execution, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class UpdateExecutionRequest(proto.Message): - r"""Request message for - [MetadataService.UpdateExecution][google.cloud.aiplatform.v1beta1.MetadataService.UpdateExecution]. - - Attributes: - execution (google.cloud.aiplatform_v1beta1.types.Execution): - Required. The Execution containing updates. The Execution's - [Execution.name][google.cloud.aiplatform.v1beta1.Execution.name] - field is used to identify the Execution to be updated. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. A FieldMask indicating which fields - should be updated. Functionality of this field - is not yet supported. - allow_missing (bool): - If set to true, and the - [Execution][google.cloud.aiplatform.v1beta1.Execution] is - not found, a new - [Execution][google.cloud.aiplatform.v1beta1.Execution] is - created. - """ - - execution = proto.Field( - proto.MESSAGE, - number=1, - message=gca_execution.Execution, - ) - update_mask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - allow_missing = proto.Field( - proto.BOOL, - number=3, - ) - - -class DeleteExecutionRequest(proto.Message): - r"""Request message for - [MetadataService.DeleteExecution][google.cloud.aiplatform.v1beta1.MetadataService.DeleteExecution]. - - Attributes: - name (str): - Required. The resource name of the Execution to delete. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` - etag (str): - Optional. The etag of the Execution to delete. If this is - provided, it must match the server's etag. Otherwise, the - request will fail with a FAILED_PRECONDITION. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - etag = proto.Field( - proto.STRING, - number=2, - ) - - -class PurgeExecutionsRequest(proto.Message): - r"""Request message for - [MetadataService.PurgeExecutions][google.cloud.aiplatform.v1beta1.MetadataService.PurgeExecutions]. - - Attributes: - parent (str): - Required. The metadata store to purge Executions from. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - filter (str): - Required. A required filter matching the Executions to be - purged. E.g., ``update_time <= 2020-11-19T11:30:00-04:00``. - force (bool): - Optional. Flag to indicate to actually perform the purge. If - ``force`` is set to false, the method will return a sample - of Execution names that would be deleted. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - force = proto.Field( - proto.BOOL, - number=3, - ) - - -class PurgeExecutionsResponse(proto.Message): - r"""Response message for - [MetadataService.PurgeExecutions][google.cloud.aiplatform.v1beta1.MetadataService.PurgeExecutions]. - - Attributes: - purge_count (int): - The number of Executions that this request deleted (or, if - ``force`` is false, the number of Executions that will be - deleted). This can be an estimate. - purge_sample (Sequence[str]): - A sample of the Execution names that will be deleted. Only - populated if ``force`` is set to false. The maximum number - of samples is 100 (it is possible to return fewer). - """ - - purge_count = proto.Field( - proto.INT64, - number=1, - ) - purge_sample = proto.RepeatedField( - proto.STRING, - number=2, - ) - - -class PurgeExecutionsMetadata(proto.Message): - r"""Details of operations that perform - [MetadataService.PurgeExecutions][google.cloud.aiplatform.v1beta1.MetadataService.PurgeExecutions]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - Operation metadata for purging Executions. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class AddExecutionEventsRequest(proto.Message): - r"""Request message for - [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1beta1.MetadataService.AddExecutionEvents]. - - Attributes: - execution (str): - Required. The resource name of the Execution that the Events - connect Artifacts with. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` - events (Sequence[google.cloud.aiplatform_v1beta1.types.Event]): - The Events to create and add. - """ - - execution = proto.Field( - proto.STRING, - number=1, - ) - events = proto.RepeatedField( - proto.MESSAGE, - number=2, - message=event.Event, - ) - - -class AddExecutionEventsResponse(proto.Message): - r"""Response message for - [MetadataService.AddExecutionEvents][google.cloud.aiplatform.v1beta1.MetadataService.AddExecutionEvents]. - - """ - - -class QueryExecutionInputsAndOutputsRequest(proto.Message): - r"""Request message for - [MetadataService.QueryExecutionInputsAndOutputs][google.cloud.aiplatform.v1beta1.MetadataService.QueryExecutionInputsAndOutputs]. - - Attributes: - execution (str): - Required. The resource name of the Execution whose input and - output Artifacts should be retrieved as a LineageSubgraph. - Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}`` - """ - - execution = proto.Field( - proto.STRING, - number=1, - ) - - -class CreateMetadataSchemaRequest(proto.Message): - r"""Request message for - [MetadataService.CreateMetadataSchema][google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataSchema]. - - Attributes: - parent (str): - Required. The resource name of the MetadataStore where the - MetadataSchema should be created. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - metadata_schema (google.cloud.aiplatform_v1beta1.types.MetadataSchema): - Required. The MetadataSchema to create. - metadata_schema_id (str): - The {metadata_schema} portion of the resource name with the - format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema}`` - If not provided, the MetadataStore's ID will be a UUID - generated by the service. Must be 4-128 characters in - length. Valid characters are ``/[a-z][0-9]-/``. Must be - unique across all MetadataSchemas in the parent Location. - (Otherwise the request will fail with ALREADY_EXISTS, or - PERMISSION_DENIED if the caller can't view the preexisting - MetadataSchema.) - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - metadata_schema = proto.Field( - proto.MESSAGE, - number=2, - message=gca_metadata_schema.MetadataSchema, - ) - metadata_schema_id = proto.Field( - proto.STRING, - number=3, - ) - - -class GetMetadataSchemaRequest(proto.Message): - r"""Request message for - [MetadataService.GetMetadataSchema][google.cloud.aiplatform.v1beta1.MetadataService.GetMetadataSchema]. - - Attributes: - name (str): - Required. The resource name of the MetadataSchema to - retrieve. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListMetadataSchemasRequest(proto.Message): - r"""Request message for - [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataSchemas]. - - Attributes: - parent (str): - Required. The MetadataStore whose MetadataSchemas should be - listed. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}`` - page_size (int): - The maximum number of MetadataSchemas to - return. The service may return fewer. - Must be in range 1-1000, inclusive. Defaults to - 100. - page_token (str): - A page token, received from a previous - [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataSchemas] - call. Provide this to retrieve the next page. - - When paginating, all other provided parameters must match - the call that provided the page token. (Otherwise the - request will fail with INVALID_ARGUMENT error.) - filter (str): - A query to filter available MetadataSchemas - for matching results. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - page_size = proto.Field( - proto.INT32, - number=2, - ) - page_token = proto.Field( - proto.STRING, - number=3, - ) - filter = proto.Field( - proto.STRING, - number=4, - ) - - -class ListMetadataSchemasResponse(proto.Message): - r"""Response message for - [MetadataService.ListMetadataSchemas][google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataSchemas]. - - Attributes: - metadata_schemas (Sequence[google.cloud.aiplatform_v1beta1.types.MetadataSchema]): - The MetadataSchemas found for the - MetadataStore. - next_page_token (str): - A token, which can be sent as - [ListMetadataSchemasRequest.page_token][google.cloud.aiplatform.v1beta1.ListMetadataSchemasRequest.page_token] - to retrieve the next page. If this field is not populated, - there are no subsequent pages. - """ - - @property - def raw_page(self): - return self - - metadata_schemas = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_metadata_schema.MetadataSchema, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class QueryArtifactLineageSubgraphRequest(proto.Message): - r"""Request message for - [MetadataService.QueryArtifactLineageSubgraph][google.cloud.aiplatform.v1beta1.MetadataService.QueryArtifactLineageSubgraph]. - - Attributes: - artifact (str): - Required. The resource name of the Artifact whose Lineage - needs to be retrieved as a LineageSubgraph. Format: - ``projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}`` - - The request may error with FAILED_PRECONDITION if the number - of Artifacts, the number of Executions, or the number of - Events that would be returned for the Context exceeds 1000. - max_hops (int): - Specifies the size of the lineage graph in terms of number - of hops from the specified artifact. Negative Value: - INVALID_ARGUMENT error is returned 0: Only input artifact is - returned. No value: Transitive closure is performed to - return the complete graph. - filter (str): - Filter specifying the boolean condition for the Artifacts to - satisfy in order to be part of the Lineage Subgraph. The - syntax to define filter query is based on - https://google.aip.dev/160. The supported set of filters - include the following: - - - **Attribute filtering**: For example: - ``display_name = "test"`` Supported fields include: - ``name``, ``display_name``, ``uri``, ``state``, - ``schema_title``, ``create_time``, and ``update_time``. - Time fields, such as ``create_time`` and ``update_time``, - require values specified in RFC-3339 format. For example: - ``create_time = "2020-11-19T11:30:00-04:00"`` - - **Metadata field**: To filter on metadata fields use - traversal operation as follows: - ``metadata..``. For example: - ``metadata.field_1.number_value = 10.0`` - - Each of the above supported filter types can be combined - together using logical operators (``AND`` & ``OR``). - - For example: - ``display_name = "test" AND metadata.field1.bool_value = true``. - """ - - artifact = proto.Field( - proto.STRING, - number=1, - ) - max_hops = proto.Field( - proto.INT32, - number=2, - ) - filter = proto.Field( - proto.STRING, - number=3, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/metadata_store.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/metadata_store.py deleted file mode 100644 index a0897f3704..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/metadata_store.py +++ /dev/null @@ -1,100 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'MetadataStore', - }, -) - - -class MetadataStore(proto.Message): - r"""Instance of a metadata store. Contains a set of metadata that - can be queried. - - Attributes: - name (str): - Output only. The resource name of the - MetadataStore instance. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - MetadataStore was created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - MetadataStore was last updated. - encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec): - Customer-managed encryption key spec for a - Metadata Store. If set, this Metadata Store and - all sub-resources of this Metadata Store are - secured using this key. - description (str): - Description of the MetadataStore. - state (google.cloud.aiplatform_v1beta1.types.MetadataStore.MetadataStoreState): - Output only. State information of the - MetadataStore. - """ - - class MetadataStoreState(proto.Message): - r"""Represents state information for a MetadataStore. - - Attributes: - disk_utilization_bytes (int): - The disk utilization of the MetadataStore in - bytes. - """ - - disk_utilization_bytes = proto.Field( - proto.INT64, - number=1, - ) - - name = proto.Field( - proto.STRING, - number=1, - ) - create_time = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - encryption_spec = proto.Field( - proto.MESSAGE, - number=5, - message=gca_encryption_spec.EncryptionSpec, - ) - description = proto.Field( - proto.STRING, - number=6, - ) - state = proto.Field( - proto.MESSAGE, - number=7, - message=MetadataStoreState, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/migratable_resource.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/migratable_resource.py deleted file mode 100644 index 3ac5f704e4..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/migratable_resource.py +++ /dev/null @@ -1,228 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'MigratableResource', - }, -) - - -class MigratableResource(proto.Message): - r"""Represents one resource that exists in automl.googleapis.com, - datalabeling.googleapis.com or ml.googleapis.com. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - ml_engine_model_version (google.cloud.aiplatform_v1beta1.types.MigratableResource.MlEngineModelVersion): - Output only. Represents one Version in - ml.googleapis.com. - - This field is a member of `oneof`_ ``resource``. - automl_model (google.cloud.aiplatform_v1beta1.types.MigratableResource.AutomlModel): - Output only. Represents one Model in - automl.googleapis.com. - - This field is a member of `oneof`_ ``resource``. - automl_dataset (google.cloud.aiplatform_v1beta1.types.MigratableResource.AutomlDataset): - Output only. Represents one Dataset in - automl.googleapis.com. - - This field is a member of `oneof`_ ``resource``. - data_labeling_dataset (google.cloud.aiplatform_v1beta1.types.MigratableResource.DataLabelingDataset): - Output only. Represents one Dataset in - datalabeling.googleapis.com. - - This field is a member of `oneof`_ ``resource``. - last_migrate_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when the last - migration attempt on this MigratableResource - started. Will not be set if there's no migration - attempt on this MigratableResource. - last_update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - MigratableResource was last updated. - """ - - class MlEngineModelVersion(proto.Message): - r"""Represents one model Version in ml.googleapis.com. - - Attributes: - endpoint (str): - The ml.googleapis.com endpoint that this model Version - currently lives in. Example values: - - - ml.googleapis.com - - us-centrall-ml.googleapis.com - - europe-west4-ml.googleapis.com - - asia-east1-ml.googleapis.com - version (str): - Full resource name of ml engine model Version. Format: - ``projects/{project}/models/{model}/versions/{version}``. - """ - - endpoint = proto.Field( - proto.STRING, - number=1, - ) - version = proto.Field( - proto.STRING, - number=2, - ) - - class AutomlModel(proto.Message): - r"""Represents one Model in automl.googleapis.com. - - Attributes: - model (str): - Full resource name of automl Model. Format: - ``projects/{project}/locations/{location}/models/{model}``. - model_display_name (str): - The Model's display name in - automl.googleapis.com. - """ - - model = proto.Field( - proto.STRING, - number=1, - ) - model_display_name = proto.Field( - proto.STRING, - number=3, - ) - - class AutomlDataset(proto.Message): - r"""Represents one Dataset in automl.googleapis.com. - - Attributes: - dataset (str): - Full resource name of automl Dataset. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}``. - dataset_display_name (str): - The Dataset's display name in - automl.googleapis.com. - """ - - dataset = proto.Field( - proto.STRING, - number=1, - ) - dataset_display_name = proto.Field( - proto.STRING, - number=4, - ) - - class DataLabelingDataset(proto.Message): - r"""Represents one Dataset in datalabeling.googleapis.com. - - Attributes: - dataset (str): - Full resource name of data labeling Dataset. Format: - ``projects/{project}/datasets/{dataset}``. - dataset_display_name (str): - The Dataset's display name in - datalabeling.googleapis.com. - data_labeling_annotated_datasets (Sequence[google.cloud.aiplatform_v1beta1.types.MigratableResource.DataLabelingDataset.DataLabelingAnnotatedDataset]): - The migratable AnnotatedDataset in - datalabeling.googleapis.com belongs to the data - labeling Dataset. - """ - - class DataLabelingAnnotatedDataset(proto.Message): - r"""Represents one AnnotatedDataset in - datalabeling.googleapis.com. - - Attributes: - annotated_dataset (str): - Full resource name of data labeling AnnotatedDataset. - Format: - ``projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}``. - annotated_dataset_display_name (str): - The AnnotatedDataset's display name in - datalabeling.googleapis.com. - """ - - annotated_dataset = proto.Field( - proto.STRING, - number=1, - ) - annotated_dataset_display_name = proto.Field( - proto.STRING, - number=3, - ) - - dataset = proto.Field( - proto.STRING, - number=1, - ) - dataset_display_name = proto.Field( - proto.STRING, - number=4, - ) - data_labeling_annotated_datasets = proto.RepeatedField( - proto.MESSAGE, - number=3, - message='MigratableResource.DataLabelingDataset.DataLabelingAnnotatedDataset', - ) - - ml_engine_model_version = proto.Field( - proto.MESSAGE, - number=1, - oneof='resource', - message=MlEngineModelVersion, - ) - automl_model = proto.Field( - proto.MESSAGE, - number=2, - oneof='resource', - message=AutomlModel, - ) - automl_dataset = proto.Field( - proto.MESSAGE, - number=3, - oneof='resource', - message=AutomlDataset, - ) - data_labeling_dataset = proto.Field( - proto.MESSAGE, - number=4, - oneof='resource', - message=DataLabelingDataset, - ) - last_migrate_time = proto.Field( - proto.MESSAGE, - number=5, - message=timestamp_pb2.Timestamp, - ) - last_update_time = proto.Field( - proto.MESSAGE, - number=6, - message=timestamp_pb2.Timestamp, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/migration_service.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/migration_service.py deleted file mode 100644 index 5c919c4f73..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/migration_service.py +++ /dev/null @@ -1,479 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import migratable_resource as gca_migratable_resource -from google.cloud.aiplatform_v1beta1.types import operation -from google.rpc import status_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'SearchMigratableResourcesRequest', - 'SearchMigratableResourcesResponse', - 'BatchMigrateResourcesRequest', - 'MigrateResourceRequest', - 'BatchMigrateResourcesResponse', - 'MigrateResourceResponse', - 'BatchMigrateResourcesOperationMetadata', - }, -) - - -class SearchMigratableResourcesRequest(proto.Message): - r"""Request message for - [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1beta1.MigrationService.SearchMigratableResources]. - - Attributes: - parent (str): - Required. The location that the migratable resources should - be searched from. It's the Vertex AI location that the - resources can be migrated to, not the resources' original - location. Format: - ``projects/{project}/locations/{location}`` - page_size (int): - The standard page size. - The default and maximum value is 100. - page_token (str): - The standard page token. - filter (str): - A filter for your search. You can use the following types of - filters: - - - Resource type filters. The following strings filter for a - specific type of - [MigratableResource][google.cloud.aiplatform.v1beta1.MigratableResource]: - - - ``ml_engine_model_version:*`` - - ``automl_model:*`` - - ``automl_dataset:*`` - - ``data_labeling_dataset:*`` - - - "Migrated or not" filters. The following strings filter - for resources that either have or have not already been - migrated: - - - ``last_migrate_time:*`` filters for migrated - resources. - - ``NOT last_migrate_time:*`` filters for not yet - migrated resources. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - page_size = proto.Field( - proto.INT32, - number=2, - ) - page_token = proto.Field( - proto.STRING, - number=3, - ) - filter = proto.Field( - proto.STRING, - number=4, - ) - - -class SearchMigratableResourcesResponse(proto.Message): - r"""Response message for - [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1beta1.MigrationService.SearchMigratableResources]. - - Attributes: - migratable_resources (Sequence[google.cloud.aiplatform_v1beta1.types.MigratableResource]): - All migratable resources that can be migrated - to the location specified in the request. - next_page_token (str): - The standard next-page token. The migratable_resources may - not fill page_size in SearchMigratableResourcesRequest even - when there are subsequent pages. - """ - - @property - def raw_page(self): - return self - - migratable_resources = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_migratable_resource.MigratableResource, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class BatchMigrateResourcesRequest(proto.Message): - r"""Request message for - [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1beta1.MigrationService.BatchMigrateResources]. - - Attributes: - parent (str): - Required. The location of the migrated resource will live - in. Format: ``projects/{project}/locations/{location}`` - migrate_resource_requests (Sequence[google.cloud.aiplatform_v1beta1.types.MigrateResourceRequest]): - Required. The request messages specifying the - resources to migrate. They must be in the same - location as the destination. Up to 50 resources - can be migrated in one batch. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - migrate_resource_requests = proto.RepeatedField( - proto.MESSAGE, - number=2, - message='MigrateResourceRequest', - ) - - -class MigrateResourceRequest(proto.Message): - r"""Config of migrating one resource from automl.googleapis.com, - datalabeling.googleapis.com and ml.googleapis.com to Vertex AI. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - migrate_ml_engine_model_version_config (google.cloud.aiplatform_v1beta1.types.MigrateResourceRequest.MigrateMlEngineModelVersionConfig): - Config for migrating Version in - ml.googleapis.com to Vertex AI's Model. - - This field is a member of `oneof`_ ``request``. - migrate_automl_model_config (google.cloud.aiplatform_v1beta1.types.MigrateResourceRequest.MigrateAutomlModelConfig): - Config for migrating Model in - automl.googleapis.com to Vertex AI's Model. - - This field is a member of `oneof`_ ``request``. - migrate_automl_dataset_config (google.cloud.aiplatform_v1beta1.types.MigrateResourceRequest.MigrateAutomlDatasetConfig): - Config for migrating Dataset in - automl.googleapis.com to Vertex AI's Dataset. - - This field is a member of `oneof`_ ``request``. - migrate_data_labeling_dataset_config (google.cloud.aiplatform_v1beta1.types.MigrateResourceRequest.MigrateDataLabelingDatasetConfig): - Config for migrating Dataset in - datalabeling.googleapis.com to Vertex AI's - Dataset. - - This field is a member of `oneof`_ ``request``. - """ - - class MigrateMlEngineModelVersionConfig(proto.Message): - r"""Config for migrating version in ml.googleapis.com to Vertex - AI's Model. - - Attributes: - endpoint (str): - Required. The ml.googleapis.com endpoint that this model - version should be migrated from. Example values: - - - ml.googleapis.com - - - us-centrall-ml.googleapis.com - - - europe-west4-ml.googleapis.com - - - asia-east1-ml.googleapis.com - model_version (str): - Required. Full resource name of ml engine model version. - Format: - ``projects/{project}/models/{model}/versions/{version}``. - model_display_name (str): - Required. Display name of the model in Vertex - AI. System will pick a display name if - unspecified. - """ - - endpoint = proto.Field( - proto.STRING, - number=1, - ) - model_version = proto.Field( - proto.STRING, - number=2, - ) - model_display_name = proto.Field( - proto.STRING, - number=3, - ) - - class MigrateAutomlModelConfig(proto.Message): - r"""Config for migrating Model in automl.googleapis.com to Vertex - AI's Model. - - Attributes: - model (str): - Required. Full resource name of automl Model. Format: - ``projects/{project}/locations/{location}/models/{model}``. - model_display_name (str): - Optional. Display name of the model in Vertex - AI. System will pick a display name if - unspecified. - """ - - model = proto.Field( - proto.STRING, - number=1, - ) - model_display_name = proto.Field( - proto.STRING, - number=2, - ) - - class MigrateAutomlDatasetConfig(proto.Message): - r"""Config for migrating Dataset in automl.googleapis.com to - Vertex AI's Dataset. - - Attributes: - dataset (str): - Required. Full resource name of automl Dataset. Format: - ``projects/{project}/locations/{location}/datasets/{dataset}``. - dataset_display_name (str): - Required. Display name of the Dataset in - Vertex AI. System will pick a display name if - unspecified. - """ - - dataset = proto.Field( - proto.STRING, - number=1, - ) - dataset_display_name = proto.Field( - proto.STRING, - number=2, - ) - - class MigrateDataLabelingDatasetConfig(proto.Message): - r"""Config for migrating Dataset in datalabeling.googleapis.com - to Vertex AI's Dataset. - - Attributes: - dataset (str): - Required. Full resource name of data labeling Dataset. - Format: ``projects/{project}/datasets/{dataset}``. - dataset_display_name (str): - Optional. Display name of the Dataset in - Vertex AI. System will pick a display name if - unspecified. - migrate_data_labeling_annotated_dataset_configs (Sequence[google.cloud.aiplatform_v1beta1.types.MigrateResourceRequest.MigrateDataLabelingDatasetConfig.MigrateDataLabelingAnnotatedDatasetConfig]): - Optional. Configs for migrating - AnnotatedDataset in datalabeling.googleapis.com - to Vertex AI's SavedQuery. The specified - AnnotatedDatasets have to belong to the - datalabeling Dataset. - """ - - class MigrateDataLabelingAnnotatedDatasetConfig(proto.Message): - r"""Config for migrating AnnotatedDataset in - datalabeling.googleapis.com to Vertex AI's SavedQuery. - - Attributes: - annotated_dataset (str): - Required. Full resource name of data labeling - AnnotatedDataset. Format: - ``projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}``. - """ - - annotated_dataset = proto.Field( - proto.STRING, - number=1, - ) - - dataset = proto.Field( - proto.STRING, - number=1, - ) - dataset_display_name = proto.Field( - proto.STRING, - number=2, - ) - migrate_data_labeling_annotated_dataset_configs = proto.RepeatedField( - proto.MESSAGE, - number=3, - message='MigrateResourceRequest.MigrateDataLabelingDatasetConfig.MigrateDataLabelingAnnotatedDatasetConfig', - ) - - migrate_ml_engine_model_version_config = proto.Field( - proto.MESSAGE, - number=1, - oneof='request', - message=MigrateMlEngineModelVersionConfig, - ) - migrate_automl_model_config = proto.Field( - proto.MESSAGE, - number=2, - oneof='request', - message=MigrateAutomlModelConfig, - ) - migrate_automl_dataset_config = proto.Field( - proto.MESSAGE, - number=3, - oneof='request', - message=MigrateAutomlDatasetConfig, - ) - migrate_data_labeling_dataset_config = proto.Field( - proto.MESSAGE, - number=4, - oneof='request', - message=MigrateDataLabelingDatasetConfig, - ) - - -class BatchMigrateResourcesResponse(proto.Message): - r"""Response message for - [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1beta1.MigrationService.BatchMigrateResources]. - - Attributes: - migrate_resource_responses (Sequence[google.cloud.aiplatform_v1beta1.types.MigrateResourceResponse]): - Successfully migrated resources. - """ - - migrate_resource_responses = proto.RepeatedField( - proto.MESSAGE, - number=1, - message='MigrateResourceResponse', - ) - - -class MigrateResourceResponse(proto.Message): - r"""Describes a successfully migrated resource. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - dataset (str): - Migrated Dataset's resource name. - - This field is a member of `oneof`_ ``migrated_resource``. - model (str): - Migrated Model's resource name. - - This field is a member of `oneof`_ ``migrated_resource``. - migratable_resource (google.cloud.aiplatform_v1beta1.types.MigratableResource): - Before migration, the identifier in - ml.googleapis.com, automl.googleapis.com or - datalabeling.googleapis.com. - """ - - dataset = proto.Field( - proto.STRING, - number=1, - oneof='migrated_resource', - ) - model = proto.Field( - proto.STRING, - number=2, - oneof='migrated_resource', - ) - migratable_resource = proto.Field( - proto.MESSAGE, - number=3, - message=gca_migratable_resource.MigratableResource, - ) - - -class BatchMigrateResourcesOperationMetadata(proto.Message): - r"""Runtime operation information for - [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1beta1.MigrationService.BatchMigrateResources]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - The common part of the operation metadata. - partial_results (Sequence[google.cloud.aiplatform_v1beta1.types.BatchMigrateResourcesOperationMetadata.PartialResult]): - Partial results that reflect the latest - migration operation progress. - """ - - class PartialResult(proto.Message): - r"""Represents a partial result in batch migration operation for one - [MigrateResourceRequest][google.cloud.aiplatform.v1beta1.MigrateResourceRequest]. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - error (google.rpc.status_pb2.Status): - The error result of the migration request in - case of failure. - - This field is a member of `oneof`_ ``result``. - model (str): - Migrated model resource name. - - This field is a member of `oneof`_ ``result``. - dataset (str): - Migrated dataset resource name. - - This field is a member of `oneof`_ ``result``. - request (google.cloud.aiplatform_v1beta1.types.MigrateResourceRequest): - It's the same as the value in - [MigrateResourceRequest.migrate_resource_requests][]. - """ - - error = proto.Field( - proto.MESSAGE, - number=2, - oneof='result', - message=status_pb2.Status, - ) - model = proto.Field( - proto.STRING, - number=3, - oneof='result', - ) - dataset = proto.Field( - proto.STRING, - number=4, - oneof='result', - ) - request = proto.Field( - proto.MESSAGE, - number=1, - message='MigrateResourceRequest', - ) - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - partial_results = proto.RepeatedField( - proto.MESSAGE, - number=2, - message=PartialResult, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model.py deleted file mode 100644 index 679026b214..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model.py +++ /dev/null @@ -1,754 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import deployed_model_ref -from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec -from google.cloud.aiplatform_v1beta1.types import env_var -from google.cloud.aiplatform_v1beta1.types import explanation -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'Model', - 'PredictSchemata', - 'ModelContainerSpec', - 'Port', - }, -) - - -class Model(proto.Message): - r"""A trained machine learning Model. - - Attributes: - name (str): - The resource name of the Model. - display_name (str): - Required. The display name of the Model. - The name can be up to 128 characters long and - can be consist of any UTF-8 characters. - description (str): - The description of the Model. - predict_schemata (google.cloud.aiplatform_v1beta1.types.PredictSchemata): - The schemata that describe formats of the Model's - predictions and explanations as given and returned via - [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict] - and - [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]. - metadata_schema_uri (str): - Immutable. Points to a YAML file stored on Google Cloud - Storage describing additional information about the Model, - that is specific to it. Unset if the Model does not have any - additional information. The schema is defined as an OpenAPI - 3.0.2 `Schema - Object `__. - AutoML Models always have this field populated by Vertex AI, - if no additional metadata is needed, this field is set to an - empty string. Note: The URI given on output will be - immutable and probably different, including the URI scheme, - than the one given on input. The output URI will point to a - location where the user only has a read access. - metadata (google.protobuf.struct_pb2.Value): - Immutable. An additional information about the Model; the - schema of the metadata can be found in - [metadata_schema][google.cloud.aiplatform.v1beta1.Model.metadata_schema_uri]. - Unset if the Model does not have any additional information. - supported_export_formats (Sequence[google.cloud.aiplatform_v1beta1.types.Model.ExportFormat]): - Output only. The formats in which this Model - may be exported. If empty, this Model is not - available for export. - training_pipeline (str): - Output only. The resource name of the - TrainingPipeline that uploaded this Model, if - any. - container_spec (google.cloud.aiplatform_v1beta1.types.ModelContainerSpec): - Input only. The specification of the container that is to be - used when deploying this Model. The specification is - ingested upon - [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel], - and all binaries it contains are copied and stored - internally by Vertex AI. Not present for AutoML Models. - artifact_uri (str): - Immutable. The path to the directory - containing the Model artifact and any of its - supporting files. Not present for AutoML Models. - supported_deployment_resources_types (Sequence[google.cloud.aiplatform_v1beta1.types.Model.DeploymentResourcesType]): - Output only. When this Model is deployed, its prediction - resources are described by the ``prediction_resources`` - field of the - [Endpoint.deployed_models][google.cloud.aiplatform.v1beta1.Endpoint.deployed_models] - object. Because not all Models support all resource - configuration types, the configuration types this Model - supports are listed here. If no configuration types are - listed, the Model cannot be deployed to an - [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] and - does not support online predictions - ([PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict] - or - [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]). - Such a Model can serve predictions by using a - [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob], - if it has at least one entry each in - [supported_input_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_input_storage_formats] - and - [supported_output_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_output_storage_formats]. - supported_input_storage_formats (Sequence[str]): - Output only. The formats this Model supports in - [BatchPredictionJob.input_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.input_config]. - If - [PredictSchemata.instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri] - exists, the instances should be given as per that schema. - - The possible formats are: - - - ``jsonl`` The JSON Lines format, where each instance is a - single line. Uses - [GcsSource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.gcs_source]. - - - ``csv`` The CSV format, where each instance is a single - comma-separated line. The first line in the file is the - header, containing comma-separated field names. Uses - [GcsSource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.gcs_source]. - - - ``tf-record`` The TFRecord format, where each instance is - a single record in tfrecord syntax. Uses - [GcsSource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.gcs_source]. - - - ``tf-record-gzip`` Similar to ``tf-record``, but the file - is gzipped. Uses - [GcsSource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.gcs_source]. - - - ``bigquery`` Each instance is a single row in BigQuery. - Uses - [BigQuerySource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.bigquery_source]. - - - ``file-list`` Each line of the file is the location of an - instance to process, uses ``gcs_source`` field of the - [InputConfig][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig] - object. - - If this Model doesn't support any of these formats it means - it cannot be used with a - [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob]. - However, if it has - [supported_deployment_resources_types][google.cloud.aiplatform.v1beta1.Model.supported_deployment_resources_types], - it could serve online predictions by using - [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict] - or - [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]. - supported_output_storage_formats (Sequence[str]): - Output only. The formats this Model supports in - [BatchPredictionJob.output_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.output_config]. - If both - [PredictSchemata.instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri] - and - [PredictSchemata.prediction_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.prediction_schema_uri] - exist, the predictions are returned together with their - instances. In other words, the prediction has the original - instance data first, followed by the actual prediction - content (as per the schema). - - The possible formats are: - - - ``jsonl`` The JSON Lines format, where each prediction is - a single line. Uses - [GcsDestination][google.cloud.aiplatform.v1beta1.BatchPredictionJob.OutputConfig.gcs_destination]. - - - ``csv`` The CSV format, where each prediction is a single - comma-separated line. The first line in the file is the - header, containing comma-separated field names. Uses - [GcsDestination][google.cloud.aiplatform.v1beta1.BatchPredictionJob.OutputConfig.gcs_destination]. - - - ``bigquery`` Each prediction is a single row in a - BigQuery table, uses - [BigQueryDestination][google.cloud.aiplatform.v1beta1.BatchPredictionJob.OutputConfig.bigquery_destination] - . - - If this Model doesn't support any of these formats it means - it cannot be used with a - [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob]. - However, if it has - [supported_deployment_resources_types][google.cloud.aiplatform.v1beta1.Model.supported_deployment_resources_types], - it could serve online predictions by using - [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict] - or - [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Model was - uploaded into Vertex AI. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Model was - most recently updated. - deployed_models (Sequence[google.cloud.aiplatform_v1beta1.types.DeployedModelRef]): - Output only. The pointers to DeployedModels - created from this Model. Note that Model could - have been deployed to Endpoints in different - Locations. - explanation_spec (google.cloud.aiplatform_v1beta1.types.ExplanationSpec): - The default explanation specification for this Model. - - The Model can be used for [requesting - explanation][PredictionService.Explain] after being - [deployed][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel] - if it is populated. The Model can be used for [batch - explanation][BatchPredictionJob.generate_explanation] if it - is populated. - - All fields of the explanation_spec can be overridden by - [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] - of - [DeployModelRequest.deployed_model][google.cloud.aiplatform.v1beta1.DeployModelRequest.deployed_model], - or - [explanation_spec][google.cloud.aiplatform.v1beta1.BatchPredictionJob.explanation_spec] - of - [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob]. - - If the default explanation specification is not set for this - Model, this Model can still be used for [requesting - explanation][PredictionService.Explain] by setting - [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] - of - [DeployModelRequest.deployed_model][google.cloud.aiplatform.v1beta1.DeployModelRequest.deployed_model] - and for [batch - explanation][BatchPredictionJob.generate_explanation] by - setting - [explanation_spec][google.cloud.aiplatform.v1beta1.BatchPredictionJob.explanation_spec] - of - [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob]. - etag (str): - Used to perform consistent read-modify-write - updates. If not set, a blind "overwrite" update - happens. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.Model.LabelsEntry]): - The labels with user-defined metadata to - organize your Models. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. - See https://goo.gl/xmQnxf for more information - and examples of labels. - encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec): - Customer-managed encryption key spec for a - Model. If set, this Model and all sub-resources - of this Model will be secured by this key. - """ - class DeploymentResourcesType(proto.Enum): - r"""Identifies a type of Model's prediction resources.""" - DEPLOYMENT_RESOURCES_TYPE_UNSPECIFIED = 0 - DEDICATED_RESOURCES = 1 - AUTOMATIC_RESOURCES = 2 - - class ExportFormat(proto.Message): - r"""Represents export format supported by the Model. - All formats export to Google Cloud Storage. - - Attributes: - id (str): - Output only. The ID of the export format. The possible - format IDs are: - - - ``tflite`` Used for Android mobile devices. - - - ``edgetpu-tflite`` Used for `Edge - TPU `__ devices. - - - ``tf-saved-model`` A tensorflow model in SavedModel - format. - - - ``tf-js`` A - `TensorFlow.js `__ model - that can be used in the browser and in Node.js using - JavaScript. - - - ``core-ml`` Used for iOS mobile devices. - - - ``custom-trained`` A Model that was uploaded or trained - by custom code. - exportable_contents (Sequence[google.cloud.aiplatform_v1beta1.types.Model.ExportFormat.ExportableContent]): - Output only. The content of this Model that - may be exported. - """ - class ExportableContent(proto.Enum): - r"""The Model content that can be exported.""" - EXPORTABLE_CONTENT_UNSPECIFIED = 0 - ARTIFACT = 1 - IMAGE = 2 - - id = proto.Field( - proto.STRING, - number=1, - ) - exportable_contents = proto.RepeatedField( - proto.ENUM, - number=2, - enum='Model.ExportFormat.ExportableContent', - ) - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - description = proto.Field( - proto.STRING, - number=3, - ) - predict_schemata = proto.Field( - proto.MESSAGE, - number=4, - message='PredictSchemata', - ) - metadata_schema_uri = proto.Field( - proto.STRING, - number=5, - ) - metadata = proto.Field( - proto.MESSAGE, - number=6, - message=struct_pb2.Value, - ) - supported_export_formats = proto.RepeatedField( - proto.MESSAGE, - number=20, - message=ExportFormat, - ) - training_pipeline = proto.Field( - proto.STRING, - number=7, - ) - container_spec = proto.Field( - proto.MESSAGE, - number=9, - message='ModelContainerSpec', - ) - artifact_uri = proto.Field( - proto.STRING, - number=26, - ) - supported_deployment_resources_types = proto.RepeatedField( - proto.ENUM, - number=10, - enum=DeploymentResourcesType, - ) - supported_input_storage_formats = proto.RepeatedField( - proto.STRING, - number=11, - ) - supported_output_storage_formats = proto.RepeatedField( - proto.STRING, - number=12, - ) - create_time = proto.Field( - proto.MESSAGE, - number=13, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=14, - message=timestamp_pb2.Timestamp, - ) - deployed_models = proto.RepeatedField( - proto.MESSAGE, - number=15, - message=deployed_model_ref.DeployedModelRef, - ) - explanation_spec = proto.Field( - proto.MESSAGE, - number=23, - message=explanation.ExplanationSpec, - ) - etag = proto.Field( - proto.STRING, - number=16, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=17, - ) - encryption_spec = proto.Field( - proto.MESSAGE, - number=24, - message=gca_encryption_spec.EncryptionSpec, - ) - - -class PredictSchemata(proto.Message): - r"""Contains the schemata used in Model's predictions and explanations - via - [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict], - [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain] - and - [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob]. - - Attributes: - instance_schema_uri (str): - Immutable. Points to a YAML file stored on Google Cloud - Storage describing the format of a single instance, which - are used in - [PredictRequest.instances][google.cloud.aiplatform.v1beta1.PredictRequest.instances], - [ExplainRequest.instances][google.cloud.aiplatform.v1beta1.ExplainRequest.instances] - and - [BatchPredictionJob.input_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.input_config]. - The schema is defined as an OpenAPI 3.0.2 `Schema - Object `__. - AutoML Models always have this field populated by Vertex AI. - Note: The URI given on output will be immutable and probably - different, including the URI scheme, than the one given on - input. The output URI will point to a location where the - user only has a read access. - parameters_schema_uri (str): - Immutable. Points to a YAML file stored on Google Cloud - Storage describing the parameters of prediction and - explanation via - [PredictRequest.parameters][google.cloud.aiplatform.v1beta1.PredictRequest.parameters], - [ExplainRequest.parameters][google.cloud.aiplatform.v1beta1.ExplainRequest.parameters] - and - [BatchPredictionJob.model_parameters][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model_parameters]. - The schema is defined as an OpenAPI 3.0.2 `Schema - Object `__. - AutoML Models always have this field populated by Vertex AI, - if no parameters are supported, then it is set to an empty - string. Note: The URI given on output will be immutable and - probably different, including the URI scheme, than the one - given on input. The output URI will point to a location - where the user only has a read access. - prediction_schema_uri (str): - Immutable. Points to a YAML file stored on Google Cloud - Storage describing the format of a single prediction - produced by this Model, which are returned via - [PredictResponse.predictions][google.cloud.aiplatform.v1beta1.PredictResponse.predictions], - [ExplainResponse.explanations][google.cloud.aiplatform.v1beta1.ExplainResponse.explanations], - and - [BatchPredictionJob.output_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.output_config]. - The schema is defined as an OpenAPI 3.0.2 `Schema - Object `__. - AutoML Models always have this field populated by Vertex AI. - Note: The URI given on output will be immutable and probably - different, including the URI scheme, than the one given on - input. The output URI will point to a location where the - user only has a read access. - """ - - instance_schema_uri = proto.Field( - proto.STRING, - number=1, - ) - parameters_schema_uri = proto.Field( - proto.STRING, - number=2, - ) - prediction_schema_uri = proto.Field( - proto.STRING, - number=3, - ) - - -class ModelContainerSpec(proto.Message): - r"""Specification of a container for serving predictions. Some fields in - this message correspond to fields in the `Kubernetes Container v1 - core - specification `__. - - Attributes: - image_uri (str): - Required. Immutable. URI of the Docker image to be used as - the custom container for serving predictions. This URI must - identify an image in Artifact Registry or Container - Registry. Learn more about the `container publishing - requirements `__, - including permissions requirements for the Vertex AI Service - Agent. - - The container image is ingested upon - [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel], - stored internally, and this original path is afterwards not - used. - - To learn about the requirements for the Docker image itself, - see `Custom container - requirements `__. - - You can use the URI to one of Vertex AI's `pre-built - container images for - prediction `__ - in this field. - command (Sequence[str]): - Immutable. Specifies the command that runs when the - container starts. This overrides the container's - `ENTRYPOINT `__. - Specify this field as an array of executable and arguments, - similar to a Docker ``ENTRYPOINT``'s "exec" form, not its - "shell" form. - - If you do not specify this field, then the container's - ``ENTRYPOINT`` runs, in conjunction with the - [args][google.cloud.aiplatform.v1beta1.ModelContainerSpec.args] - field or the container's - ```CMD`` `__, - if either exists. If this field is not specified and the - container does not have an ``ENTRYPOINT``, then refer to the - Docker documentation about `how ``CMD`` and ``ENTRYPOINT`` - interact `__. - - If you specify this field, then you can also specify the - ``args`` field to provide additional arguments for this - command. However, if you specify this field, then the - container's ``CMD`` is ignored. See the `Kubernetes - documentation about how the ``command`` and ``args`` fields - interact with a container's ``ENTRYPOINT`` and - ``CMD`` `__. - - In this field, you can reference `environment variables set - by Vertex - AI `__ - and environment variables set in the - [env][google.cloud.aiplatform.v1beta1.ModelContainerSpec.env] - field. You cannot reference environment variables set in the - Docker image. In order for environment variables to be - expanded, reference them by using the following syntax: - $(VARIABLE_NAME) Note that this differs from Bash variable - expansion, which does not use parentheses. If a variable - cannot be resolved, the reference in the input string is - used unchanged. To avoid variable expansion, you can escape - this syntax with ``$$``; for example: $$(VARIABLE_NAME) This - field corresponds to the ``command`` field of the Kubernetes - Containers `v1 core - API `__. - args (Sequence[str]): - Immutable. Specifies arguments for the command that runs - when the container starts. This overrides the container's - ```CMD`` `__. - Specify this field as an array of executable and arguments, - similar to a Docker ``CMD``'s "default parameters" form. - - If you don't specify this field but do specify the - [command][google.cloud.aiplatform.v1beta1.ModelContainerSpec.command] - field, then the command from the ``command`` field runs - without any additional arguments. See the `Kubernetes - documentation about how the ``command`` and ``args`` fields - interact with a container's ``ENTRYPOINT`` and - ``CMD`` `__. - - If you don't specify this field and don't specify the - ``command`` field, then the container's - ```ENTRYPOINT`` `__ - and ``CMD`` determine what runs based on their default - behavior. See the Docker documentation about `how ``CMD`` - and ``ENTRYPOINT`` - interact `__. - - In this field, you can reference `environment variables set - by Vertex - AI `__ - and environment variables set in the - [env][google.cloud.aiplatform.v1beta1.ModelContainerSpec.env] - field. You cannot reference environment variables set in the - Docker image. In order for environment variables to be - expanded, reference them by using the following syntax: - $(VARIABLE_NAME) Note that this differs from Bash variable - expansion, which does not use parentheses. If a variable - cannot be resolved, the reference in the input string is - used unchanged. To avoid variable expansion, you can escape - this syntax with ``$$``; for example: $$(VARIABLE_NAME) This - field corresponds to the ``args`` field of the Kubernetes - Containers `v1 core - API `__. - env (Sequence[google.cloud.aiplatform_v1beta1.types.EnvVar]): - Immutable. List of environment variables to set in the - container. After the container starts running, code running - in the container can read these environment variables. - - Additionally, the - [command][google.cloud.aiplatform.v1beta1.ModelContainerSpec.command] - and - [args][google.cloud.aiplatform.v1beta1.ModelContainerSpec.args] - fields can reference these variables. Later entries in this - list can also reference earlier entries. For example, the - following example sets the variable ``VAR_2`` to have the - value ``foo bar``: - - .. code:: json - - [ - { - "name": "VAR_1", - "value": "foo" - }, - { - "name": "VAR_2", - "value": "$(VAR_1) bar" - } - ] - - If you switch the order of the variables in the example, - then the expansion does not occur. - - This field corresponds to the ``env`` field of the - Kubernetes Containers `v1 core - API `__. - ports (Sequence[google.cloud.aiplatform_v1beta1.types.Port]): - Immutable. List of ports to expose from the container. - Vertex AI sends any prediction requests that it receives to - the first port on this list. Vertex AI also sends `liveness - and health - checks `__ - to this port. - - If you do not specify this field, it defaults to following - value: - - .. code:: json - - [ - { - "containerPort": 8080 - } - ] - - Vertex AI does not use ports other than the first one - listed. This field corresponds to the ``ports`` field of the - Kubernetes Containers `v1 core - API `__. - predict_route (str): - Immutable. HTTP path on the container to send prediction - requests to. Vertex AI forwards requests sent using - [projects.locations.endpoints.predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict] - to this path on the container's IP address and port. Vertex - AI then returns the container's response in the API - response. - - For example, if you set this field to ``/foo``, then when - Vertex AI receives a prediction request, it forwards the - request body in a POST request to the ``/foo`` path on the - port of your container specified by the first value of this - ``ModelContainerSpec``'s - [ports][google.cloud.aiplatform.v1beta1.ModelContainerSpec.ports] - field. - - If you don't specify this field, it defaults to the - following value when you [deploy this Model to an - Endpoint][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]: - /v1/endpoints/ENDPOINT/deployedModels/DEPLOYED_MODEL:predict - The placeholders in this value are replaced as follows: - - - ENDPOINT: The last segment (following ``endpoints/``)of - the Endpoint.name][] field of the Endpoint where this - Model has been deployed. (Vertex AI makes this value - available to your container code as the - ```AIP_ENDPOINT_ID`` environment - variable `__.) - - - DEPLOYED_MODEL: - [DeployedModel.id][google.cloud.aiplatform.v1beta1.DeployedModel.id] - of the ``DeployedModel``. (Vertex AI makes this value - available to your container code as the - ```AIP_DEPLOYED_MODEL_ID`` environment - variable `__.) - health_route (str): - Immutable. HTTP path on the container to send health checks - to. Vertex AI intermittently sends GET requests to this path - on the container's IP address and port to check that the - container is healthy. Read more about `health - checks `__. - - For example, if you set this field to ``/bar``, then Vertex - AI intermittently sends a GET request to the ``/bar`` path - on the port of your container specified by the first value - of this ``ModelContainerSpec``'s - [ports][google.cloud.aiplatform.v1beta1.ModelContainerSpec.ports] - field. - - If you don't specify this field, it defaults to the - following value when you [deploy this Model to an - Endpoint][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]: - /v1/endpoints/ENDPOINT/deployedModels/DEPLOYED_MODEL:predict - The placeholders in this value are replaced as follows: - - - ENDPOINT: The last segment (following ``endpoints/``)of - the Endpoint.name][] field of the Endpoint where this - Model has been deployed. (Vertex AI makes this value - available to your container code as the - ```AIP_ENDPOINT_ID`` environment - variable `__.) - - - DEPLOYED_MODEL: - [DeployedModel.id][google.cloud.aiplatform.v1beta1.DeployedModel.id] - of the ``DeployedModel``. (Vertex AI makes this value - available to your container code as the - ```AIP_DEPLOYED_MODEL_ID`` environment - variable `__.) - """ - - image_uri = proto.Field( - proto.STRING, - number=1, - ) - command = proto.RepeatedField( - proto.STRING, - number=2, - ) - args = proto.RepeatedField( - proto.STRING, - number=3, - ) - env = proto.RepeatedField( - proto.MESSAGE, - number=4, - message=env_var.EnvVar, - ) - ports = proto.RepeatedField( - proto.MESSAGE, - number=5, - message='Port', - ) - predict_route = proto.Field( - proto.STRING, - number=6, - ) - health_route = proto.Field( - proto.STRING, - number=7, - ) - - -class Port(proto.Message): - r"""Represents a network port in a container. - - Attributes: - container_port (int): - The number of the port to expose on the pod's - IP address. Must be a valid port number, between - 1 and 65535 inclusive. - """ - - container_port = proto.Field( - proto.INT32, - number=3, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_deployment_monitoring_job.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_deployment_monitoring_job.py deleted file mode 100644 index 9b3cbd7893..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_deployment_monitoring_job.py +++ /dev/null @@ -1,441 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec -from google.cloud.aiplatform_v1beta1.types import feature_monitoring_stats -from google.cloud.aiplatform_v1beta1.types import io -from google.cloud.aiplatform_v1beta1.types import job_state -from google.cloud.aiplatform_v1beta1.types import model_monitoring -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'ModelDeploymentMonitoringObjectiveType', - 'ModelDeploymentMonitoringJob', - 'ModelDeploymentMonitoringBigQueryTable', - 'ModelDeploymentMonitoringObjectiveConfig', - 'ModelDeploymentMonitoringScheduleConfig', - 'ModelMonitoringStatsAnomalies', - }, -) - - -class ModelDeploymentMonitoringObjectiveType(proto.Enum): - r"""The Model Monitoring Objective types.""" - MODEL_DEPLOYMENT_MONITORING_OBJECTIVE_TYPE_UNSPECIFIED = 0 - RAW_FEATURE_SKEW = 1 - RAW_FEATURE_DRIFT = 2 - FEATURE_ATTRIBUTION_SKEW = 3 - FEATURE_ATTRIBUTION_DRIFT = 4 - - -class ModelDeploymentMonitoringJob(proto.Message): - r"""Represents a job that runs periodically to monitor the - deployed models in an endpoint. It will analyze the logged - training & prediction data to detect any abnormal behaviors. - - Attributes: - name (str): - Output only. Resource name of a - ModelDeploymentMonitoringJob. - display_name (str): - Required. The user-defined name of the - ModelDeploymentMonitoringJob. The name can be up - to 128 characters long and can be consist of any - UTF-8 characters. - Display name of a ModelDeploymentMonitoringJob. - endpoint (str): - Required. Endpoint resource name. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - state (google.cloud.aiplatform_v1beta1.types.JobState): - Output only. The detailed state of the - monitoring job. When the job is still creating, - the state will be 'PENDING'. Once the job is - successfully created, the state will be - 'RUNNING'. Pause the job, the state will be - 'PAUSED'. - Resume the job, the state will return to - 'RUNNING'. - schedule_state (google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob.MonitoringScheduleState): - Output only. Schedule state when the - monitoring job is in Running state. - model_deployment_monitoring_objective_configs (Sequence[google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringObjectiveConfig]): - Required. The config for monitoring - objectives. This is a per DeployedModel config. - Each DeployedModel needs to be configured - separately. - model_deployment_monitoring_schedule_config (google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringScheduleConfig): - Required. Schedule config for running the - monitoring job. - logging_sampling_strategy (google.cloud.aiplatform_v1beta1.types.SamplingStrategy): - Required. Sample Strategy for logging. - model_monitoring_alert_config (google.cloud.aiplatform_v1beta1.types.ModelMonitoringAlertConfig): - Alert config for model monitoring. - predict_instance_schema_uri (str): - YAML schema file uri describing the format of - a single instance, which are given to format - this Endpoint's prediction (and explanation). If - not set, we will generate predict schema from - collected predict requests. - sample_predict_instance (google.protobuf.struct_pb2.Value): - Sample Predict instance, same format as - [PredictRequest.instances][google.cloud.aiplatform.v1beta1.PredictRequest.instances], - this can be set as a replacement of - [ModelDeploymentMonitoringJob.predict_instance_schema_uri][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringJob.predict_instance_schema_uri]. - If not set, we will generate predict schema from collected - predict requests. - analysis_instance_schema_uri (str): - YAML schema file uri describing the format of a single - instance that you want Tensorflow Data Validation (TFDV) to - analyze. - - If this field is empty, all the feature data types are - inferred from - [predict_instance_schema_uri][google.cloud.aiplatform.v1beta1.ModelDeploymentMonitoringJob.predict_instance_schema_uri], - meaning that TFDV will use the data in the exact format(data - type) as prediction request/response. If there are any data - type differences between predict instance and TFDV instance, - this field can be used to override the schema. For models - trained with Vertex AI, this field must be set as all the - fields in predict instance formatted as string. - bigquery_tables (Sequence[google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringBigQueryTable]): - Output only. The created bigquery tables for - the job under customer project. Customer could - do their own query & analysis. There could be 4 - log tables in maximum: - 1. Training data logging predict - request/response 2. Serving data logging predict - request/response - log_ttl (google.protobuf.duration_pb2.Duration): - The TTL of BigQuery tables in user projects - which stores logs. A day is the basic unit of - the TTL and we take the ceil of TTL/86400(a - day). e.g. { second: 3600} indicates ttl = 1 - day. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob.LabelsEntry]): - The labels with user-defined metadata to - organize your ModelDeploymentMonitoringJob. - - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. - See https://goo.gl/xmQnxf for more information - and examples of labels. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - ModelDeploymentMonitoringJob was created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - ModelDeploymentMonitoringJob was updated most - recently. - next_schedule_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this monitoring - pipeline will be scheduled to run for the next - round. - stats_anomalies_base_directory (google.cloud.aiplatform_v1beta1.types.GcsDestination): - Stats anomalies base folder path. - encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec): - Customer-managed encryption key spec for a - ModelDeploymentMonitoringJob. If set, this - ModelDeploymentMonitoringJob and all sub- - resources of this ModelDeploymentMonitoringJob - will be secured by this key. - enable_monitoring_pipeline_logs (bool): - If true, the scheduled monitoring pipeline logs are sent to - Google Cloud Logging, including pipeline status and - anomalies detected. Please note the logs incur cost, which - are subject to `Cloud Logging - pricing `__. - error (google.rpc.status_pb2.Status): - Output only. Only populated when the job's state is - ``JOB_STATE_FAILED`` or ``JOB_STATE_CANCELLED``. - """ - class MonitoringScheduleState(proto.Enum): - r"""The state to Specify the monitoring pipeline.""" - MONITORING_SCHEDULE_STATE_UNSPECIFIED = 0 - PENDING = 1 - OFFLINE = 2 - RUNNING = 3 - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - endpoint = proto.Field( - proto.STRING, - number=3, - ) - state = proto.Field( - proto.ENUM, - number=4, - enum=job_state.JobState, - ) - schedule_state = proto.Field( - proto.ENUM, - number=5, - enum=MonitoringScheduleState, - ) - model_deployment_monitoring_objective_configs = proto.RepeatedField( - proto.MESSAGE, - number=6, - message='ModelDeploymentMonitoringObjectiveConfig', - ) - model_deployment_monitoring_schedule_config = proto.Field( - proto.MESSAGE, - number=7, - message='ModelDeploymentMonitoringScheduleConfig', - ) - logging_sampling_strategy = proto.Field( - proto.MESSAGE, - number=8, - message=model_monitoring.SamplingStrategy, - ) - model_monitoring_alert_config = proto.Field( - proto.MESSAGE, - number=15, - message=model_monitoring.ModelMonitoringAlertConfig, - ) - predict_instance_schema_uri = proto.Field( - proto.STRING, - number=9, - ) - sample_predict_instance = proto.Field( - proto.MESSAGE, - number=19, - message=struct_pb2.Value, - ) - analysis_instance_schema_uri = proto.Field( - proto.STRING, - number=16, - ) - bigquery_tables = proto.RepeatedField( - proto.MESSAGE, - number=10, - message='ModelDeploymentMonitoringBigQueryTable', - ) - log_ttl = proto.Field( - proto.MESSAGE, - number=17, - message=duration_pb2.Duration, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=11, - ) - create_time = proto.Field( - proto.MESSAGE, - number=12, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=13, - message=timestamp_pb2.Timestamp, - ) - next_schedule_time = proto.Field( - proto.MESSAGE, - number=14, - message=timestamp_pb2.Timestamp, - ) - stats_anomalies_base_directory = proto.Field( - proto.MESSAGE, - number=20, - message=io.GcsDestination, - ) - encryption_spec = proto.Field( - proto.MESSAGE, - number=21, - message=gca_encryption_spec.EncryptionSpec, - ) - enable_monitoring_pipeline_logs = proto.Field( - proto.BOOL, - number=22, - ) - error = proto.Field( - proto.MESSAGE, - number=23, - message=status_pb2.Status, - ) - - -class ModelDeploymentMonitoringBigQueryTable(proto.Message): - r"""ModelDeploymentMonitoringBigQueryTable specifies the BigQuery - table name as well as some information of the logs stored in - this table. - - Attributes: - log_source (google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringBigQueryTable.LogSource): - The source of log. - log_type (google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringBigQueryTable.LogType): - The type of log. - bigquery_table_path (str): - The created BigQuery table to store logs. Customer could do - their own query & analysis. Format: - ``bq://.model_deployment_monitoring_._`` - """ - class LogSource(proto.Enum): - r"""Indicates where does the log come from.""" - LOG_SOURCE_UNSPECIFIED = 0 - TRAINING = 1 - SERVING = 2 - - class LogType(proto.Enum): - r"""Indicates what type of traffic does the log belong to.""" - LOG_TYPE_UNSPECIFIED = 0 - PREDICT = 1 - EXPLAIN = 2 - - log_source = proto.Field( - proto.ENUM, - number=1, - enum=LogSource, - ) - log_type = proto.Field( - proto.ENUM, - number=2, - enum=LogType, - ) - bigquery_table_path = proto.Field( - proto.STRING, - number=3, - ) - - -class ModelDeploymentMonitoringObjectiveConfig(proto.Message): - r"""ModelDeploymentMonitoringObjectiveConfig contains the pair of - deployed_model_id to ModelMonitoringObjectiveConfig. - - Attributes: - deployed_model_id (str): - The DeployedModel ID of the objective config. - objective_config (google.cloud.aiplatform_v1beta1.types.ModelMonitoringObjectiveConfig): - The objective config of for the - modelmonitoring job of this deployed model. - """ - - deployed_model_id = proto.Field( - proto.STRING, - number=1, - ) - objective_config = proto.Field( - proto.MESSAGE, - number=2, - message=model_monitoring.ModelMonitoringObjectiveConfig, - ) - - -class ModelDeploymentMonitoringScheduleConfig(proto.Message): - r"""The config for scheduling monitoring job. - - Attributes: - monitor_interval (google.protobuf.duration_pb2.Duration): - Required. The model monitoring job running - interval. It will be rounded up to next full - hour. - """ - - monitor_interval = proto.Field( - proto.MESSAGE, - number=1, - message=duration_pb2.Duration, - ) - - -class ModelMonitoringStatsAnomalies(proto.Message): - r"""Statistics and anomalies generated by Model Monitoring. - - Attributes: - objective (google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringObjectiveType): - Model Monitoring Objective those stats and - anomalies belonging to. - deployed_model_id (str): - Deployed Model ID. - anomaly_count (int): - Number of anomalies within all stats. - feature_stats (Sequence[google.cloud.aiplatform_v1beta1.types.ModelMonitoringStatsAnomalies.FeatureHistoricStatsAnomalies]): - A list of historical Stats and Anomalies - generated for all Features. - """ - - class FeatureHistoricStatsAnomalies(proto.Message): - r"""Historical Stats (and Anomalies) for a specific Feature. - - Attributes: - feature_display_name (str): - Display Name of the Feature. - threshold (google.cloud.aiplatform_v1beta1.types.ThresholdConfig): - Threshold for anomaly detection. - training_stats (google.cloud.aiplatform_v1beta1.types.FeatureStatsAnomaly): - Stats calculated for the Training Dataset. - prediction_stats (Sequence[google.cloud.aiplatform_v1beta1.types.FeatureStatsAnomaly]): - A list of historical stats generated by - different time window's Prediction Dataset. - """ - - feature_display_name = proto.Field( - proto.STRING, - number=1, - ) - threshold = proto.Field( - proto.MESSAGE, - number=3, - message=model_monitoring.ThresholdConfig, - ) - training_stats = proto.Field( - proto.MESSAGE, - number=4, - message=feature_monitoring_stats.FeatureStatsAnomaly, - ) - prediction_stats = proto.RepeatedField( - proto.MESSAGE, - number=5, - message=feature_monitoring_stats.FeatureStatsAnomaly, - ) - - objective = proto.Field( - proto.ENUM, - number=1, - enum='ModelDeploymentMonitoringObjectiveType', - ) - deployed_model_id = proto.Field( - proto.STRING, - number=2, - ) - anomaly_count = proto.Field( - proto.INT32, - number=3, - ) - feature_stats = proto.RepeatedField( - proto.MESSAGE, - number=4, - message=FeatureHistoricStatsAnomalies, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_evaluation.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_evaluation.py deleted file mode 100644 index 9b63061df3..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_evaluation.py +++ /dev/null @@ -1,134 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import explanation -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'ModelEvaluation', - }, -) - - -class ModelEvaluation(proto.Message): - r"""A collection of metrics calculated by comparing Model's - predictions on all of the test data against annotations from the - test data. - - Attributes: - name (str): - Output only. The resource name of the - ModelEvaluation. - metrics_schema_uri (str): - Output only. Points to a YAML file stored on Google Cloud - Storage describing the - [metrics][google.cloud.aiplatform.v1beta1.ModelEvaluation.metrics] - of this ModelEvaluation. The schema is defined as an OpenAPI - 3.0.2 `Schema - Object `__. - metrics (google.protobuf.struct_pb2.Value): - Output only. Evaluation metrics of the Model. The schema of - the metrics is stored in - [metrics_schema_uri][google.cloud.aiplatform.v1beta1.ModelEvaluation.metrics_schema_uri] - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - ModelEvaluation was created. - slice_dimensions (Sequence[str]): - Output only. All possible - [dimensions][ModelEvaluationSlice.slice.dimension] of - ModelEvaluationSlices. The dimensions can be used as the - filter of the - [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices] - request, in the form of ``slice.dimension = ``. - model_explanation (google.cloud.aiplatform_v1beta1.types.ModelExplanation): - Output only. Aggregated explanation metrics - for the Model's prediction output over the data - this ModelEvaluation uses. This field is - populated only if the Model is evaluated with - explanations, and only for AutoML tabular - Models. - explanation_specs (Sequence[google.cloud.aiplatform_v1beta1.types.ModelEvaluation.ModelEvaluationExplanationSpec]): - Output only. Describes the values of - [ExplanationSpec][google.cloud.aiplatform.v1beta1.ExplanationSpec] - that are used for explaining the predicted values on the - evaluated data. - """ - - class ModelEvaluationExplanationSpec(proto.Message): - r""" - - Attributes: - explanation_type (str): - Explanation type. - - For AutoML Image Classification models, possible values are: - - - ``image-integrated-gradients`` - - ``image-xrai`` - explanation_spec (google.cloud.aiplatform_v1beta1.types.ExplanationSpec): - Explanation spec details. - """ - - explanation_type = proto.Field( - proto.STRING, - number=1, - ) - explanation_spec = proto.Field( - proto.MESSAGE, - number=2, - message=explanation.ExplanationSpec, - ) - - name = proto.Field( - proto.STRING, - number=1, - ) - metrics_schema_uri = proto.Field( - proto.STRING, - number=2, - ) - metrics = proto.Field( - proto.MESSAGE, - number=3, - message=struct_pb2.Value, - ) - create_time = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - slice_dimensions = proto.RepeatedField( - proto.STRING, - number=5, - ) - model_explanation = proto.Field( - proto.MESSAGE, - number=8, - message=explanation.ModelExplanation, - ) - explanation_specs = proto.RepeatedField( - proto.MESSAGE, - number=9, - message=ModelEvaluationExplanationSpec, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_evaluation_slice.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_evaluation_slice.py deleted file mode 100644 index 94059f67b6..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_evaluation_slice.py +++ /dev/null @@ -1,110 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'ModelEvaluationSlice', - }, -) - - -class ModelEvaluationSlice(proto.Message): - r"""A collection of metrics calculated by comparing Model's - predictions on a slice of the test data against ground truth - annotations. - - Attributes: - name (str): - Output only. The resource name of the - ModelEvaluationSlice. - slice_ (google.cloud.aiplatform_v1beta1.types.ModelEvaluationSlice.Slice): - Output only. The slice of the test data that - is used to evaluate the Model. - metrics_schema_uri (str): - Output only. Points to a YAML file stored on Google Cloud - Storage describing the - [metrics][google.cloud.aiplatform.v1beta1.ModelEvaluationSlice.metrics] - of this ModelEvaluationSlice. The schema is defined as an - OpenAPI 3.0.2 `Schema - Object `__. - metrics (google.protobuf.struct_pb2.Value): - Output only. Sliced evaluation metrics of the Model. The - schema of the metrics is stored in - [metrics_schema_uri][google.cloud.aiplatform.v1beta1.ModelEvaluationSlice.metrics_schema_uri] - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - ModelEvaluationSlice was created. - """ - - class Slice(proto.Message): - r"""Definition of a slice. - - Attributes: - dimension (str): - Output only. The dimension of the slice. Well-known - dimensions are: - - - ``annotationSpec``: This slice is on the test data that - has either ground truth or prediction with - [AnnotationSpec.display_name][google.cloud.aiplatform.v1beta1.AnnotationSpec.display_name] - equals to - [value][google.cloud.aiplatform.v1beta1.ModelEvaluationSlice.Slice.value]. - value (str): - Output only. The value of the dimension in - this slice. - """ - - dimension = proto.Field( - proto.STRING, - number=1, - ) - value = proto.Field( - proto.STRING, - number=2, - ) - - name = proto.Field( - proto.STRING, - number=1, - ) - slice_ = proto.Field( - proto.MESSAGE, - number=2, - message=Slice, - ) - metrics_schema_uri = proto.Field( - proto.STRING, - number=3, - ) - metrics = proto.Field( - proto.MESSAGE, - number=4, - message=struct_pb2.Value, - ) - create_time = proto.Field( - proto.MESSAGE, - number=5, - message=timestamp_pb2.Timestamp, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_monitoring.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_monitoring.py deleted file mode 100644 index e268558834..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_monitoring.py +++ /dev/null @@ -1,398 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import io - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'ModelMonitoringObjectiveConfig', - 'ModelMonitoringAlertConfig', - 'ThresholdConfig', - 'SamplingStrategy', - }, -) - - -class ModelMonitoringObjectiveConfig(proto.Message): - r"""Next ID: 6 - - Attributes: - training_dataset (google.cloud.aiplatform_v1beta1.types.ModelMonitoringObjectiveConfig.TrainingDataset): - Training dataset for models. This field has - to be set only if - TrainingPredictionSkewDetectionConfig is - specified. - training_prediction_skew_detection_config (google.cloud.aiplatform_v1beta1.types.ModelMonitoringObjectiveConfig.TrainingPredictionSkewDetectionConfig): - The config for skew between training data and - prediction data. - prediction_drift_detection_config (google.cloud.aiplatform_v1beta1.types.ModelMonitoringObjectiveConfig.PredictionDriftDetectionConfig): - The config for drift of prediction data. - explanation_config (google.cloud.aiplatform_v1beta1.types.ModelMonitoringObjectiveConfig.ExplanationConfig): - The config for integrating with Vertex - Explainable AI. - """ - - class TrainingDataset(proto.Message): - r"""Training Dataset information. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - dataset (str): - The resource name of the Dataset used to - train this Model. - - This field is a member of `oneof`_ ``data_source``. - gcs_source (google.cloud.aiplatform_v1beta1.types.GcsSource): - The Google Cloud Storage uri of the unmanaged - Dataset used to train this Model. - - This field is a member of `oneof`_ ``data_source``. - bigquery_source (google.cloud.aiplatform_v1beta1.types.BigQuerySource): - The BigQuery table of the unmanaged Dataset - used to train this Model. - - This field is a member of `oneof`_ ``data_source``. - data_format (str): - Data format of the dataset, only applicable - if the input is from Google Cloud Storage. - The possible formats are: - - "tf-record" - The source file is a TFRecord file. - - "csv" - The source file is a CSV file. - target_field (str): - The target field name the model is to - predict. This field will be excluded when doing - Predict and (or) Explain for the training data. - logging_sampling_strategy (google.cloud.aiplatform_v1beta1.types.SamplingStrategy): - Strategy to sample data from Training - Dataset. If not set, we process the whole - dataset. - """ - - dataset = proto.Field( - proto.STRING, - number=3, - oneof='data_source', - ) - gcs_source = proto.Field( - proto.MESSAGE, - number=4, - oneof='data_source', - message=io.GcsSource, - ) - bigquery_source = proto.Field( - proto.MESSAGE, - number=5, - oneof='data_source', - message=io.BigQuerySource, - ) - data_format = proto.Field( - proto.STRING, - number=2, - ) - target_field = proto.Field( - proto.STRING, - number=6, - ) - logging_sampling_strategy = proto.Field( - proto.MESSAGE, - number=7, - message='SamplingStrategy', - ) - - class TrainingPredictionSkewDetectionConfig(proto.Message): - r"""The config for Training & Prediction data skew detection. It - specifies the training dataset sources and the skew detection - parameters. - - Attributes: - skew_thresholds (Sequence[google.cloud.aiplatform_v1beta1.types.ModelMonitoringObjectiveConfig.TrainingPredictionSkewDetectionConfig.SkewThresholdsEntry]): - Key is the feature name and value is the - threshold. If a feature needs to be monitored - for skew, a value threshold must be configured - for that feature. The threshold here is against - feature distribution distance between the - training and prediction feature. - attribution_score_skew_thresholds (Sequence[google.cloud.aiplatform_v1beta1.types.ModelMonitoringObjectiveConfig.TrainingPredictionSkewDetectionConfig.AttributionScoreSkewThresholdsEntry]): - Key is the feature name and value is the - threshold. The threshold here is against - attribution score distance between the training - and prediction feature. - """ - - skew_thresholds = proto.MapField( - proto.STRING, - proto.MESSAGE, - number=1, - message='ThresholdConfig', - ) - attribution_score_skew_thresholds = proto.MapField( - proto.STRING, - proto.MESSAGE, - number=2, - message='ThresholdConfig', - ) - - class PredictionDriftDetectionConfig(proto.Message): - r"""The config for Prediction data drift detection. - - Attributes: - drift_thresholds (Sequence[google.cloud.aiplatform_v1beta1.types.ModelMonitoringObjectiveConfig.PredictionDriftDetectionConfig.DriftThresholdsEntry]): - Key is the feature name and value is the - threshold. If a feature needs to be monitored - for drift, a value threshold must be configured - for that feature. The threshold here is against - feature distribution distance between different - time windws. - attribution_score_drift_thresholds (Sequence[google.cloud.aiplatform_v1beta1.types.ModelMonitoringObjectiveConfig.PredictionDriftDetectionConfig.AttributionScoreDriftThresholdsEntry]): - Key is the feature name and value is the - threshold. The threshold here is against - attribution score distance between different - time windows. - """ - - drift_thresholds = proto.MapField( - proto.STRING, - proto.MESSAGE, - number=1, - message='ThresholdConfig', - ) - attribution_score_drift_thresholds = proto.MapField( - proto.STRING, - proto.MESSAGE, - number=2, - message='ThresholdConfig', - ) - - class ExplanationConfig(proto.Message): - r"""The config for integrating with Vertex Explainable AI. Only - applicable if the Model has explanation_spec populated. - - Attributes: - enable_feature_attributes (bool): - If want to analyze the Vertex Explainable AI - feature attribute scores or not. If set to true, - Vertex AI will log the feature attributions from - explain response and do the skew/drift detection - for them. - explanation_baseline (google.cloud.aiplatform_v1beta1.types.ModelMonitoringObjectiveConfig.ExplanationConfig.ExplanationBaseline): - Predictions generated by the - BatchPredictionJob using baseline dataset. - """ - - class ExplanationBaseline(proto.Message): - r"""Output from - [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob] - for Model Monitoring baseline dataset, which can be used to generate - baseline attribution scores. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - gcs (google.cloud.aiplatform_v1beta1.types.GcsDestination): - Cloud Storage location for BatchExplain - output. - - This field is a member of `oneof`_ ``destination``. - bigquery (google.cloud.aiplatform_v1beta1.types.BigQueryDestination): - BigQuery location for BatchExplain output. - - This field is a member of `oneof`_ ``destination``. - prediction_format (google.cloud.aiplatform_v1beta1.types.ModelMonitoringObjectiveConfig.ExplanationConfig.ExplanationBaseline.PredictionFormat): - The storage format of the predictions - generated BatchPrediction job. - """ - class PredictionFormat(proto.Enum): - r"""The storage format of the predictions generated - BatchPrediction job. - """ - PREDICTION_FORMAT_UNSPECIFIED = 0 - JSONL = 2 - BIGQUERY = 3 - - gcs = proto.Field( - proto.MESSAGE, - number=2, - oneof='destination', - message=io.GcsDestination, - ) - bigquery = proto.Field( - proto.MESSAGE, - number=3, - oneof='destination', - message=io.BigQueryDestination, - ) - prediction_format = proto.Field( - proto.ENUM, - number=1, - enum='ModelMonitoringObjectiveConfig.ExplanationConfig.ExplanationBaseline.PredictionFormat', - ) - - enable_feature_attributes = proto.Field( - proto.BOOL, - number=1, - ) - explanation_baseline = proto.Field( - proto.MESSAGE, - number=2, - message='ModelMonitoringObjectiveConfig.ExplanationConfig.ExplanationBaseline', - ) - - training_dataset = proto.Field( - proto.MESSAGE, - number=1, - message=TrainingDataset, - ) - training_prediction_skew_detection_config = proto.Field( - proto.MESSAGE, - number=2, - message=TrainingPredictionSkewDetectionConfig, - ) - prediction_drift_detection_config = proto.Field( - proto.MESSAGE, - number=3, - message=PredictionDriftDetectionConfig, - ) - explanation_config = proto.Field( - proto.MESSAGE, - number=5, - message=ExplanationConfig, - ) - - -class ModelMonitoringAlertConfig(proto.Message): - r"""Next ID: 3 - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - email_alert_config (google.cloud.aiplatform_v1beta1.types.ModelMonitoringAlertConfig.EmailAlertConfig): - Email alert config. - - This field is a member of `oneof`_ ``alert``. - enable_logging (bool): - Dump the anomalies to Cloud Logging. The anomalies will be - put to json payload encoded from proto - [google.cloud.aiplatform.logging.ModelMonitoringAnomaliesLogEntry][]. - This can be further sinked to Pub/Sub or any other services - supported by Cloud Logging. - """ - - class EmailAlertConfig(proto.Message): - r"""The config for email alert. - - Attributes: - user_emails (Sequence[str]): - The email addresses to send the alert. - """ - - user_emails = proto.RepeatedField( - proto.STRING, - number=1, - ) - - email_alert_config = proto.Field( - proto.MESSAGE, - number=1, - oneof='alert', - message=EmailAlertConfig, - ) - enable_logging = proto.Field( - proto.BOOL, - number=2, - ) - - -class ThresholdConfig(proto.Message): - r"""The config for feature monitoring threshold. - Next ID: 3 - - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - value (float): - Specify a threshold value that can trigger - the alert. If this threshold config is for - feature distribution distance: 1. For - categorical feature, the distribution distance - is calculated by L-inifinity norm. - 2. For numerical feature, the distribution - distance is calculated by Jensen–Shannon - divergence. - Each feature must have a non-zero threshold if - they need to be monitored. Otherwise no alert - will be triggered for that feature. - - This field is a member of `oneof`_ ``threshold``. - """ - - value = proto.Field( - proto.DOUBLE, - number=1, - oneof='threshold', - ) - - -class SamplingStrategy(proto.Message): - r"""Sampling Strategy for logging, can be for both training and - prediction dataset. - Next ID: 2 - - Attributes: - random_sample_config (google.cloud.aiplatform_v1beta1.types.SamplingStrategy.RandomSampleConfig): - Random sample config. Will support more - sampling strategies later. - """ - - class RandomSampleConfig(proto.Message): - r"""Requests are randomly selected. - - Attributes: - sample_rate (float): - Sample rate (0, 1] - """ - - sample_rate = proto.Field( - proto.DOUBLE, - number=1, - ) - - random_sample_config = proto.Field( - proto.MESSAGE, - number=1, - message=RandomSampleConfig, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_service.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_service.py deleted file mode 100644 index 7062d1c84b..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/model_service.py +++ /dev/null @@ -1,571 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import io -from google.cloud.aiplatform_v1beta1.types import model as gca_model -from google.cloud.aiplatform_v1beta1.types import model_evaluation -from google.cloud.aiplatform_v1beta1.types import model_evaluation_slice -from google.cloud.aiplatform_v1beta1.types import operation -from google.protobuf import field_mask_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'UploadModelRequest', - 'UploadModelOperationMetadata', - 'UploadModelResponse', - 'GetModelRequest', - 'ListModelsRequest', - 'ListModelsResponse', - 'UpdateModelRequest', - 'DeleteModelRequest', - 'ExportModelRequest', - 'ExportModelOperationMetadata', - 'ExportModelResponse', - 'GetModelEvaluationRequest', - 'ListModelEvaluationsRequest', - 'ListModelEvaluationsResponse', - 'GetModelEvaluationSliceRequest', - 'ListModelEvaluationSlicesRequest', - 'ListModelEvaluationSlicesResponse', - }, -) - - -class UploadModelRequest(proto.Message): - r"""Request message for - [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel]. - - Attributes: - parent (str): - Required. The resource name of the Location into which to - upload the Model. Format: - ``projects/{project}/locations/{location}`` - model (google.cloud.aiplatform_v1beta1.types.Model): - Required. The Model to create. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - model = proto.Field( - proto.MESSAGE, - number=2, - message=gca_model.Model, - ) - - -class UploadModelOperationMetadata(proto.Message): - r"""Details of - [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] - operation. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - The common part of the operation metadata. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class UploadModelResponse(proto.Message): - r"""Response message of - [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] - operation. - - Attributes: - model (str): - The name of the uploaded Model resource. Format: - ``projects/{project}/locations/{location}/models/{model}`` - """ - - model = proto.Field( - proto.STRING, - number=1, - ) - - -class GetModelRequest(proto.Message): - r"""Request message for - [ModelService.GetModel][google.cloud.aiplatform.v1beta1.ModelService.GetModel]. - - Attributes: - name (str): - Required. The name of the Model resource. Format: - ``projects/{project}/locations/{location}/models/{model}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListModelsRequest(proto.Message): - r"""Request message for - [ModelService.ListModels][google.cloud.aiplatform.v1beta1.ModelService.ListModels]. - - Attributes: - parent (str): - Required. The resource name of the Location to list the - Models from. Format: - ``projects/{project}/locations/{location}`` - filter (str): - An expression for filtering the results of the request. For - field names both snake_case and camelCase are supported. - - - ``model`` supports = and !=. ``model`` represents the - Model ID, i.e. the last segment of the Model's [resource - name][google.cloud.aiplatform.v1beta1.Model.name]. - - ``display_name`` supports = and != - - ``labels`` supports general map functions that is: - - - ``labels.key=value`` - key:value equality - - \`labels.key:\* or labels:key - key existence - - A key including a space must be quoted. - ``labels."a key"``. - - Some examples: - - - ``model=1234`` - - ``displayName="myDisplayName"`` - - ``labels.myKey="myValue"`` - page_size (int): - The standard list page size. - page_token (str): - The standard list page token. Typically obtained via - [ListModelsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListModelsResponse.next_page_token] - of the previous - [ModelService.ListModels][google.cloud.aiplatform.v1beta1.ModelService.ListModels] - call. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=5, - message=field_mask_pb2.FieldMask, - ) - - -class ListModelsResponse(proto.Message): - r"""Response message for - [ModelService.ListModels][google.cloud.aiplatform.v1beta1.ModelService.ListModels] - - Attributes: - models (Sequence[google.cloud.aiplatform_v1beta1.types.Model]): - List of Models in the requested page. - next_page_token (str): - A token to retrieve next page of results. Pass to - [ListModelsRequest.page_token][google.cloud.aiplatform.v1beta1.ListModelsRequest.page_token] - to obtain that page. - """ - - @property - def raw_page(self): - return self - - models = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_model.Model, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class UpdateModelRequest(proto.Message): - r"""Request message for - [ModelService.UpdateModel][google.cloud.aiplatform.v1beta1.ModelService.UpdateModel]. - - Attributes: - model (google.cloud.aiplatform_v1beta1.types.Model): - Required. The Model which replaces the - resource on the server. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The update mask applies to the resource. For the - ``FieldMask`` definition, see - [google.protobuf.FieldMask][google.protobuf.FieldMask]. - """ - - model = proto.Field( - proto.MESSAGE, - number=1, - message=gca_model.Model, - ) - update_mask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - - -class DeleteModelRequest(proto.Message): - r"""Request message for - [ModelService.DeleteModel][google.cloud.aiplatform.v1beta1.ModelService.DeleteModel]. - - Attributes: - name (str): - Required. The name of the Model resource to be deleted. - Format: - ``projects/{project}/locations/{location}/models/{model}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ExportModelRequest(proto.Message): - r"""Request message for - [ModelService.ExportModel][google.cloud.aiplatform.v1beta1.ModelService.ExportModel]. - - Attributes: - name (str): - Required. The resource name of the Model to - export. - output_config (google.cloud.aiplatform_v1beta1.types.ExportModelRequest.OutputConfig): - Required. The desired output location and - configuration. - """ - - class OutputConfig(proto.Message): - r"""Output configuration for the Model export. - - Attributes: - export_format_id (str): - The ID of the format in which the Model must be exported. - Each Model lists the [export formats it - supports][google.cloud.aiplatform.v1beta1.Model.supported_export_formats]. - If no value is provided here, then the first from the list - of the Model's supported formats is used by default. - artifact_destination (google.cloud.aiplatform_v1beta1.types.GcsDestination): - The Cloud Storage location where the Model artifact is to be - written to. Under the directory given as the destination a - new one with name - "``model-export--``", - where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 - format, will be created. Inside, the Model and any of its - supporting files will be written. This field should only be - set when the ``exportableContent`` field of the - [Model.supported_export_formats] object contains - ``ARTIFACT``. - image_destination (google.cloud.aiplatform_v1beta1.types.ContainerRegistryDestination): - The Google Container Registry or Artifact Registry uri where - the Model container image will be copied to. This field - should only be set when the ``exportableContent`` field of - the [Model.supported_export_formats] object contains - ``IMAGE``. - """ - - export_format_id = proto.Field( - proto.STRING, - number=1, - ) - artifact_destination = proto.Field( - proto.MESSAGE, - number=3, - message=io.GcsDestination, - ) - image_destination = proto.Field( - proto.MESSAGE, - number=4, - message=io.ContainerRegistryDestination, - ) - - name = proto.Field( - proto.STRING, - number=1, - ) - output_config = proto.Field( - proto.MESSAGE, - number=2, - message=OutputConfig, - ) - - -class ExportModelOperationMetadata(proto.Message): - r"""Details of - [ModelService.ExportModel][google.cloud.aiplatform.v1beta1.ModelService.ExportModel] - operation. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - The common part of the operation metadata. - output_info (google.cloud.aiplatform_v1beta1.types.ExportModelOperationMetadata.OutputInfo): - Output only. Information further describing - the output of this Model export. - """ - - class OutputInfo(proto.Message): - r"""Further describes the output of the ExportModel. Supplements - [ExportModelRequest.OutputConfig][google.cloud.aiplatform.v1beta1.ExportModelRequest.OutputConfig]. - - Attributes: - artifact_output_uri (str): - Output only. If the Model artifact is being - exported to Google Cloud Storage this is the - full path of the directory created, into which - the Model files are being written to. - image_output_uri (str): - Output only. If the Model image is being - exported to Google Container Registry or - Artifact Registry this is the full path of the - image created. - """ - - artifact_output_uri = proto.Field( - proto.STRING, - number=2, - ) - image_output_uri = proto.Field( - proto.STRING, - number=3, - ) - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - output_info = proto.Field( - proto.MESSAGE, - number=2, - message=OutputInfo, - ) - - -class ExportModelResponse(proto.Message): - r"""Response message of - [ModelService.ExportModel][google.cloud.aiplatform.v1beta1.ModelService.ExportModel] - operation. - - """ - - -class GetModelEvaluationRequest(proto.Message): - r"""Request message for - [ModelService.GetModelEvaluation][google.cloud.aiplatform.v1beta1.ModelService.GetModelEvaluation]. - - Attributes: - name (str): - Required. The name of the ModelEvaluation resource. Format: - ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListModelEvaluationsRequest(proto.Message): - r"""Request message for - [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluations]. - - Attributes: - parent (str): - Required. The resource name of the Model to list the - ModelEvaluations from. Format: - ``projects/{project}/locations/{location}/models/{model}`` - filter (str): - The standard list filter. - page_size (int): - The standard list page size. - page_token (str): - The standard list page token. Typically obtained via - [ListModelEvaluationsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListModelEvaluationsResponse.next_page_token] - of the previous - [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluations] - call. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=5, - message=field_mask_pb2.FieldMask, - ) - - -class ListModelEvaluationsResponse(proto.Message): - r"""Response message for - [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluations]. - - Attributes: - model_evaluations (Sequence[google.cloud.aiplatform_v1beta1.types.ModelEvaluation]): - List of ModelEvaluations in the requested - page. - next_page_token (str): - A token to retrieve next page of results. Pass to - [ListModelEvaluationsRequest.page_token][google.cloud.aiplatform.v1beta1.ListModelEvaluationsRequest.page_token] - to obtain that page. - """ - - @property - def raw_page(self): - return self - - model_evaluations = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=model_evaluation.ModelEvaluation, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class GetModelEvaluationSliceRequest(proto.Message): - r"""Request message for - [ModelService.GetModelEvaluationSlice][google.cloud.aiplatform.v1beta1.ModelService.GetModelEvaluationSlice]. - - Attributes: - name (str): - Required. The name of the ModelEvaluationSlice resource. - Format: - ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListModelEvaluationSlicesRequest(proto.Message): - r"""Request message for - [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices]. - - Attributes: - parent (str): - Required. The resource name of the ModelEvaluation to list - the ModelEvaluationSlices from. Format: - ``projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}`` - filter (str): - The standard list filter. - - - ``slice.dimension`` - for =. - page_size (int): - The standard list page size. - page_token (str): - The standard list page token. Typically obtained via - [ListModelEvaluationSlicesResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListModelEvaluationSlicesResponse.next_page_token] - of the previous - [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices] - call. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=5, - message=field_mask_pb2.FieldMask, - ) - - -class ListModelEvaluationSlicesResponse(proto.Message): - r"""Response message for - [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices]. - - Attributes: - model_evaluation_slices (Sequence[google.cloud.aiplatform_v1beta1.types.ModelEvaluationSlice]): - List of ModelEvaluations in the requested - page. - next_page_token (str): - A token to retrieve next page of results. Pass to - [ListModelEvaluationSlicesRequest.page_token][google.cloud.aiplatform.v1beta1.ListModelEvaluationSlicesRequest.page_token] - to obtain that page. - """ - - @property - def raw_page(self): - return self - - model_evaluation_slices = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=model_evaluation_slice.ModelEvaluationSlice, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/operation.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/operation.py deleted file mode 100644 index 0b21b7c0ea..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/operation.py +++ /dev/null @@ -1,83 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'GenericOperationMetadata', - 'DeleteOperationMetadata', - }, -) - - -class GenericOperationMetadata(proto.Message): - r"""Generic Metadata shared by all operations. - - Attributes: - partial_failures (Sequence[google.rpc.status_pb2.Status]): - Output only. Partial failures encountered. - E.g. single files that couldn't be read. - This field should never exceed 20 entries. - Status details field will contain standard GCP - error details. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the operation was - created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the operation was - updated for the last time. If the operation has - finished (successfully or not), this is the - finish time. - """ - - partial_failures = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=status_pb2.Status, - ) - create_time = proto.Field( - proto.MESSAGE, - number=2, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - - -class DeleteOperationMetadata(proto.Message): - r"""Details of operations that perform deletes of any entities. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - The common part of the operation metadata. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message='GenericOperationMetadata', - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/pipeline_job.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/pipeline_job.py deleted file mode 100644 index a4e42a70a4..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/pipeline_job.py +++ /dev/null @@ -1,518 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import artifact -from google.cloud.aiplatform_v1beta1.types import context -from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec -from google.cloud.aiplatform_v1beta1.types import execution as gca_execution -from google.cloud.aiplatform_v1beta1.types import pipeline_state -from google.cloud.aiplatform_v1beta1.types import value as gca_value -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'PipelineJob', - 'PipelineJobDetail', - 'PipelineTaskDetail', - 'PipelineTaskExecutorDetail', - }, -) - - -class PipelineJob(proto.Message): - r"""An instance of a machine learning PipelineJob. - - Attributes: - name (str): - Output only. The resource name of the - PipelineJob. - display_name (str): - The display name of the Pipeline. - The name can be up to 128 characters long and - can be consist of any UTF-8 characters. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Pipeline creation time. - start_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Pipeline start time. - end_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Pipeline end time. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this PipelineJob - was most recently updated. - pipeline_spec (google.protobuf.struct_pb2.Struct): - Required. The spec of the pipeline. - state (google.cloud.aiplatform_v1beta1.types.PipelineState): - Output only. The detailed state of the job. - job_detail (google.cloud.aiplatform_v1beta1.types.PipelineJobDetail): - Output only. The details of pipeline run. Not - available in the list view. - error (google.rpc.status_pb2.Status): - Output only. The error that occurred during - pipeline execution. Only populated when the - pipeline's state is FAILED or CANCELLED. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.PipelineJob.LabelsEntry]): - The labels with user-defined metadata to - organize PipelineJob. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. - See https://goo.gl/xmQnxf for more information - and examples of labels. - runtime_config (google.cloud.aiplatform_v1beta1.types.PipelineJob.RuntimeConfig): - Runtime config of the pipeline. - encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec): - Customer-managed encryption key spec for a - pipelineJob. If set, this PipelineJob and all of - its sub-resources will be secured by this key. - service_account (str): - The service account that the pipeline workload runs as. If - not specified, the Compute Engine default service account in - the project will be used. See - https://cloud.google.com/compute/docs/access/service-accounts#default_service_account - - Users starting the pipeline must have the - ``iam.serviceAccounts.actAs`` permission on this service - account. - network (str): - The full name of the Compute Engine - `network `__ - to which the Pipeline Job's workload should be peered. For - example, ``projects/12345/global/networks/myVPC``. - `Format `__ - is of the form - ``projects/{project}/global/networks/{network}``. Where - {project} is a project number, as in ``12345``, and - {network} is a network name. - - Private services access must already be configured for the - network. Pipeline job will apply the network configuration - to the GCP resources being launched, if applied, such as - Vertex AI Training or Dataflow job. If left unspecified, the - workload is not peered with any network. - """ - - class RuntimeConfig(proto.Message): - r"""The runtime config of a PipelineJob. - - Attributes: - parameters (Sequence[google.cloud.aiplatform_v1beta1.types.PipelineJob.RuntimeConfig.ParametersEntry]): - Deprecated. Use - [RuntimeConfig.parameter_values][google.cloud.aiplatform.v1beta1.PipelineJob.RuntimeConfig.parameter_values] - instead. The runtime parameters of the PipelineJob. The - parameters will be passed into - [PipelineJob.pipeline_spec][google.cloud.aiplatform.v1beta1.PipelineJob.pipeline_spec] - to replace the placeholders at runtime. This field is used - by pipelines built using - ``PipelineJob.pipeline_spec.schema_version`` 2.0.0 or lower, - such as pipelines built using Kubeflow Pipelines SDK 1.8 or - lower. - gcs_output_directory (str): - Required. A path in a Cloud Storage bucket, which will be - treated as the root output directory of the pipeline. It is - used by the system to generate the paths of output - artifacts. The artifact paths are generated with a sub-path - pattern ``{job_id}/{task_id}/{output_key}`` under the - specified output directory. The service account specified in - this pipeline must have the ``storage.objects.get`` and - ``storage.objects.create`` permissions for this bucket. - parameter_values (Sequence[google.cloud.aiplatform_v1beta1.types.PipelineJob.RuntimeConfig.ParameterValuesEntry]): - The runtime parameters of the PipelineJob. The parameters - will be passed into - [PipelineJob.pipeline_spec][google.cloud.aiplatform.v1beta1.PipelineJob.pipeline_spec] - to replace the placeholders at runtime. This field is used - by pipelines built using - ``PipelineJob.pipeline_spec.schema_version`` 2.1.0, such as - pipelines built using Kubeflow Pipelines SDK 1.9 or higher - and the v2 DSL. - """ - - parameters = proto.MapField( - proto.STRING, - proto.MESSAGE, - number=1, - message=gca_value.Value, - ) - gcs_output_directory = proto.Field( - proto.STRING, - number=2, - ) - parameter_values = proto.MapField( - proto.STRING, - proto.MESSAGE, - number=3, - message=struct_pb2.Value, - ) - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - create_time = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - start_time = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - end_time = proto.Field( - proto.MESSAGE, - number=5, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=6, - message=timestamp_pb2.Timestamp, - ) - pipeline_spec = proto.Field( - proto.MESSAGE, - number=7, - message=struct_pb2.Struct, - ) - state = proto.Field( - proto.ENUM, - number=8, - enum=pipeline_state.PipelineState, - ) - job_detail = proto.Field( - proto.MESSAGE, - number=9, - message='PipelineJobDetail', - ) - error = proto.Field( - proto.MESSAGE, - number=10, - message=status_pb2.Status, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=11, - ) - runtime_config = proto.Field( - proto.MESSAGE, - number=12, - message=RuntimeConfig, - ) - encryption_spec = proto.Field( - proto.MESSAGE, - number=16, - message=gca_encryption_spec.EncryptionSpec, - ) - service_account = proto.Field( - proto.STRING, - number=17, - ) - network = proto.Field( - proto.STRING, - number=18, - ) - - -class PipelineJobDetail(proto.Message): - r"""The runtime detail of PipelineJob. - - Attributes: - pipeline_context (google.cloud.aiplatform_v1beta1.types.Context): - Output only. The context of the pipeline. - pipeline_run_context (google.cloud.aiplatform_v1beta1.types.Context): - Output only. The context of the current - pipeline run. - task_details (Sequence[google.cloud.aiplatform_v1beta1.types.PipelineTaskDetail]): - Output only. The runtime details of the tasks - under the pipeline. - """ - - pipeline_context = proto.Field( - proto.MESSAGE, - number=1, - message=context.Context, - ) - pipeline_run_context = proto.Field( - proto.MESSAGE, - number=2, - message=context.Context, - ) - task_details = proto.RepeatedField( - proto.MESSAGE, - number=3, - message='PipelineTaskDetail', - ) - - -class PipelineTaskDetail(proto.Message): - r"""The runtime detail of a task execution. - - Attributes: - task_id (int): - Output only. The system generated ID of the - task. - parent_task_id (int): - Output only. The id of the parent task if the - task is within a component scope. Empty if the - task is at the root level. - task_name (str): - Output only. The user specified name of the task that is - defined in [PipelineJob.spec][]. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Task create time. - start_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Task start time. - end_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Task end time. - executor_detail (google.cloud.aiplatform_v1beta1.types.PipelineTaskExecutorDetail): - Output only. The detailed execution info. - state (google.cloud.aiplatform_v1beta1.types.PipelineTaskDetail.State): - Output only. State of the task. - execution (google.cloud.aiplatform_v1beta1.types.Execution): - Output only. The execution metadata of the - task. - error (google.rpc.status_pb2.Status): - Output only. The error that occurred during - task execution. Only populated when the task's - state is FAILED or CANCELLED. - pipeline_task_status (Sequence[google.cloud.aiplatform_v1beta1.types.PipelineTaskDetail.PipelineTaskStatus]): - Output only. A list of task status. This - field keeps a record of task status evolving - over time. - inputs (Sequence[google.cloud.aiplatform_v1beta1.types.PipelineTaskDetail.InputsEntry]): - Output only. The runtime input artifacts of - the task. - outputs (Sequence[google.cloud.aiplatform_v1beta1.types.PipelineTaskDetail.OutputsEntry]): - Output only. The runtime output artifacts of - the task. - """ - class State(proto.Enum): - r"""Specifies state of TaskExecution""" - STATE_UNSPECIFIED = 0 - PENDING = 1 - RUNNING = 2 - SUCCEEDED = 3 - CANCEL_PENDING = 4 - CANCELLING = 5 - CANCELLED = 6 - FAILED = 7 - SKIPPED = 8 - NOT_TRIGGERED = 9 - - class PipelineTaskStatus(proto.Message): - r"""A single record of the task status. - - Attributes: - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Update time of this status. - state (google.cloud.aiplatform_v1beta1.types.PipelineTaskDetail.State): - Output only. The state of the task. - error (google.rpc.status_pb2.Status): - Output only. The error that occurred during - the state. May be set when the state is any of - the non-final state (PENDING/RUNNING/CANCELLING) - or FAILED state. If the state is FAILED, the - error here is final and not going to be retried. - If the state is a non-final state, the error - indicates a system-error being retried. - """ - - update_time = proto.Field( - proto.MESSAGE, - number=1, - message=timestamp_pb2.Timestamp, - ) - state = proto.Field( - proto.ENUM, - number=2, - enum='PipelineTaskDetail.State', - ) - error = proto.Field( - proto.MESSAGE, - number=3, - message=status_pb2.Status, - ) - - class ArtifactList(proto.Message): - r"""A list of artifact metadata. - - Attributes: - artifacts (Sequence[google.cloud.aiplatform_v1beta1.types.Artifact]): - Output only. A list of artifact metadata. - """ - - artifacts = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=artifact.Artifact, - ) - - task_id = proto.Field( - proto.INT64, - number=1, - ) - parent_task_id = proto.Field( - proto.INT64, - number=12, - ) - task_name = proto.Field( - proto.STRING, - number=2, - ) - create_time = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - start_time = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - end_time = proto.Field( - proto.MESSAGE, - number=5, - message=timestamp_pb2.Timestamp, - ) - executor_detail = proto.Field( - proto.MESSAGE, - number=6, - message='PipelineTaskExecutorDetail', - ) - state = proto.Field( - proto.ENUM, - number=7, - enum=State, - ) - execution = proto.Field( - proto.MESSAGE, - number=8, - message=gca_execution.Execution, - ) - error = proto.Field( - proto.MESSAGE, - number=9, - message=status_pb2.Status, - ) - pipeline_task_status = proto.RepeatedField( - proto.MESSAGE, - number=13, - message=PipelineTaskStatus, - ) - inputs = proto.MapField( - proto.STRING, - proto.MESSAGE, - number=10, - message=ArtifactList, - ) - outputs = proto.MapField( - proto.STRING, - proto.MESSAGE, - number=11, - message=ArtifactList, - ) - - -class PipelineTaskExecutorDetail(proto.Message): - r"""The runtime detail of a pipeline executor. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - container_detail (google.cloud.aiplatform_v1beta1.types.PipelineTaskExecutorDetail.ContainerDetail): - Output only. The detailed info for a - container executor. - - This field is a member of `oneof`_ ``details``. - custom_job_detail (google.cloud.aiplatform_v1beta1.types.PipelineTaskExecutorDetail.CustomJobDetail): - Output only. The detailed info for a custom - job executor. - - This field is a member of `oneof`_ ``details``. - """ - - class ContainerDetail(proto.Message): - r"""The detail of a container execution. It contains the job - names of the lifecycle of a container execution. - - Attributes: - main_job (str): - Output only. The name of the - [CustomJob][google.cloud.aiplatform.v1beta1.CustomJob] for - the main container execution. - pre_caching_check_job (str): - Output only. The name of the - [CustomJob][google.cloud.aiplatform.v1beta1.CustomJob] for - the pre-caching-check container execution. This job will be - available if the - [PipelineJob.pipeline_spec][google.cloud.aiplatform.v1beta1.PipelineJob.pipeline_spec] - specifies the ``pre_caching_check`` hook in the lifecycle - events. - """ - - main_job = proto.Field( - proto.STRING, - number=1, - ) - pre_caching_check_job = proto.Field( - proto.STRING, - number=2, - ) - - class CustomJobDetail(proto.Message): - r"""The detailed info for a custom job executor. - - Attributes: - job (str): - Output only. The name of the - [CustomJob][google.cloud.aiplatform.v1beta1.CustomJob]. - """ - - job = proto.Field( - proto.STRING, - number=1, - ) - - container_detail = proto.Field( - proto.MESSAGE, - number=1, - oneof='details', - message=ContainerDetail, - ) - custom_job_detail = proto.Field( - proto.MESSAGE, - number=2, - oneof='details', - message=CustomJobDetail, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/pipeline_service.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/pipeline_service.py deleted file mode 100644 index 18a09996d5..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/pipeline_service.py +++ /dev/null @@ -1,412 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import pipeline_job as gca_pipeline_job -from google.cloud.aiplatform_v1beta1.types import training_pipeline as gca_training_pipeline -from google.protobuf import field_mask_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'CreateTrainingPipelineRequest', - 'GetTrainingPipelineRequest', - 'ListTrainingPipelinesRequest', - 'ListTrainingPipelinesResponse', - 'DeleteTrainingPipelineRequest', - 'CancelTrainingPipelineRequest', - 'CreatePipelineJobRequest', - 'GetPipelineJobRequest', - 'ListPipelineJobsRequest', - 'ListPipelineJobsResponse', - 'DeletePipelineJobRequest', - 'CancelPipelineJobRequest', - }, -) - - -class CreateTrainingPipelineRequest(proto.Message): - r"""Request message for - [PipelineService.CreateTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.CreateTrainingPipeline]. - - Attributes: - parent (str): - Required. The resource name of the Location to create the - TrainingPipeline in. Format: - ``projects/{project}/locations/{location}`` - training_pipeline (google.cloud.aiplatform_v1beta1.types.TrainingPipeline): - Required. The TrainingPipeline to create. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - training_pipeline = proto.Field( - proto.MESSAGE, - number=2, - message=gca_training_pipeline.TrainingPipeline, - ) - - -class GetTrainingPipelineRequest(proto.Message): - r"""Request message for - [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.GetTrainingPipeline]. - - Attributes: - name (str): - Required. The name of the TrainingPipeline resource. Format: - ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListTrainingPipelinesRequest(proto.Message): - r"""Request message for - [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1beta1.PipelineService.ListTrainingPipelines]. - - Attributes: - parent (str): - Required. The resource name of the Location to list the - TrainingPipelines from. Format: - ``projects/{project}/locations/{location}`` - filter (str): - The standard list filter. Supported fields: - - - ``display_name`` supports = and !=. - - - ``state`` supports = and !=. - - Some examples of using the filter are: - - - ``state="PIPELINE_STATE_SUCCEEDED" AND display_name="my_pipeline"`` - - - ``state="PIPELINE_STATE_RUNNING" OR display_name="my_pipeline"`` - - - ``NOT display_name="my_pipeline"`` - - - ``state="PIPELINE_STATE_FAILED"`` - page_size (int): - The standard list page size. - page_token (str): - The standard list page token. Typically obtained via - [ListTrainingPipelinesResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListTrainingPipelinesResponse.next_page_token] - of the previous - [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1beta1.PipelineService.ListTrainingPipelines] - call. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=5, - message=field_mask_pb2.FieldMask, - ) - - -class ListTrainingPipelinesResponse(proto.Message): - r"""Response message for - [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1beta1.PipelineService.ListTrainingPipelines] - - Attributes: - training_pipelines (Sequence[google.cloud.aiplatform_v1beta1.types.TrainingPipeline]): - List of TrainingPipelines in the requested - page. - next_page_token (str): - A token to retrieve the next page of results. Pass to - [ListTrainingPipelinesRequest.page_token][google.cloud.aiplatform.v1beta1.ListTrainingPipelinesRequest.page_token] - to obtain that page. - """ - - @property - def raw_page(self): - return self - - training_pipelines = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_training_pipeline.TrainingPipeline, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class DeleteTrainingPipelineRequest(proto.Message): - r"""Request message for - [PipelineService.DeleteTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.DeleteTrainingPipeline]. - - Attributes: - name (str): - Required. The name of the TrainingPipeline resource to be - deleted. Format: - ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class CancelTrainingPipelineRequest(proto.Message): - r"""Request message for - [PipelineService.CancelTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.CancelTrainingPipeline]. - - Attributes: - name (str): - Required. The name of the TrainingPipeline to cancel. - Format: - ``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class CreatePipelineJobRequest(proto.Message): - r"""Request message for - [PipelineService.CreatePipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.CreatePipelineJob]. - - Attributes: - parent (str): - Required. The resource name of the Location to create the - PipelineJob in. Format: - ``projects/{project}/locations/{location}`` - pipeline_job (google.cloud.aiplatform_v1beta1.types.PipelineJob): - Required. The PipelineJob to create. - pipeline_job_id (str): - The ID to use for the PipelineJob, which will become the - final component of the PipelineJob name. If not provided, an - ID will be automatically generated. - - This value should be less than 128 characters, and valid - characters are /[a-z][0-9]-/. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - pipeline_job = proto.Field( - proto.MESSAGE, - number=2, - message=gca_pipeline_job.PipelineJob, - ) - pipeline_job_id = proto.Field( - proto.STRING, - number=3, - ) - - -class GetPipelineJobRequest(proto.Message): - r"""Request message for - [PipelineService.GetPipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.GetPipelineJob]. - - Attributes: - name (str): - Required. The name of the PipelineJob resource. Format: - ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListPipelineJobsRequest(proto.Message): - r"""Request message for - [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1beta1.PipelineService.ListPipelineJobs]. - - Attributes: - parent (str): - Required. The resource name of the Location to list the - PipelineJobs from. Format: - ``projects/{project}/locations/{location}`` - filter (str): - Lists the PipelineJobs that match the filter expression. The - following fields are supported: - - - ``pipeline_name``: Supports ``=`` and ``!=`` comparisons. - - ``display_name``: Supports ``=``, ``!=`` comparisons, and - ``:`` wildcard. - - ``pipeline_job_user_id``: Supports ``=``, ``!=`` - comparisons, and ``:`` wildcard. for example, can check - if pipeline's display_name contains *step* by doing - display_name:"*step*" - - ``state``: Supports ``=`` and ``!=`` comparisons. - - ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``, - ``<=``, and ``>=`` comparisons. Values must be in RFC - 3339 format. - - ``update_time``: Supports ``=``, ``!=``, ``<``, ``>``, - ``<=``, and ``>=`` comparisons. Values must be in RFC - 3339 format. - - ``end_time``: Supports ``=``, ``!=``, ``<``, ``>``, - ``<=``, and ``>=`` comparisons. Values must be in RFC - 3339 format. - - ``labels``: Supports key-value equality and key presence. - - Filter expressions can be combined together using logical - operators (``AND`` & ``OR``). For example: - ``pipeline_name="test" AND create_time>"2020-05-18T13:30:00Z"``. - - The syntax to define filter expression is based on - https://google.aip.dev/160. - - Examples: - - - ``create_time>"2021-05-18T00:00:00Z" OR update_time>"2020-05-18T00:00:00Z"`` - PipelineJobs created or updated after 2020-05-18 00:00:00 - UTC. - - ``labels.env = "prod"`` PipelineJobs with label "env" set - to "prod". - page_size (int): - The standard list page size. - page_token (str): - The standard list page token. Typically obtained via - [ListPipelineJobsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListPipelineJobsResponse.next_page_token] - of the previous - [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1beta1.PipelineService.ListPipelineJobs] - call. - order_by (str): - A comma-separated list of fields to order by. The default - sort order is in ascending order. Use "desc" after a field - name for descending. You can have multiple order_by fields - provided e.g. "create_time desc, end_time", "end_time, - start_time, update_time" For example, using "create_time - desc, end_time" will order results by create time in - descending order, and if there are multiple jobs having the - same create time, order them by the end time in ascending - order. if order_by is not specified, it will order by - default order is create time in descending order. Supported - fields: - - - ``create_time`` - - ``update_time`` - - ``end_time`` - - ``start_time`` - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - order_by = proto.Field( - proto.STRING, - number=6, - ) - - -class ListPipelineJobsResponse(proto.Message): - r"""Response message for - [PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1beta1.PipelineService.ListPipelineJobs] - - Attributes: - pipeline_jobs (Sequence[google.cloud.aiplatform_v1beta1.types.PipelineJob]): - List of PipelineJobs in the requested page. - next_page_token (str): - A token to retrieve the next page of results. Pass to - [ListPipelineJobsRequest.page_token][google.cloud.aiplatform.v1beta1.ListPipelineJobsRequest.page_token] - to obtain that page. - """ - - @property - def raw_page(self): - return self - - pipeline_jobs = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_pipeline_job.PipelineJob, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class DeletePipelineJobRequest(proto.Message): - r"""Request message for - [PipelineService.DeletePipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.DeletePipelineJob]. - - Attributes: - name (str): - Required. The name of the PipelineJob resource to be - deleted. Format: - ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class CancelPipelineJobRequest(proto.Message): - r"""Request message for - [PipelineService.CancelPipelineJob][google.cloud.aiplatform.v1beta1.PipelineService.CancelPipelineJob]. - - Attributes: - name (str): - Required. The name of the PipelineJob to cancel. Format: - ``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/pipeline_state.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/pipeline_state.py deleted file mode 100644 index 83459cab69..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/pipeline_state.py +++ /dev/null @@ -1,40 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'PipelineState', - }, -) - - -class PipelineState(proto.Enum): - r"""Describes the state of a pipeline.""" - PIPELINE_STATE_UNSPECIFIED = 0 - PIPELINE_STATE_QUEUED = 1 - PIPELINE_STATE_PENDING = 2 - PIPELINE_STATE_RUNNING = 3 - PIPELINE_STATE_SUCCEEDED = 4 - PIPELINE_STATE_FAILED = 5 - PIPELINE_STATE_CANCELLING = 6 - PIPELINE_STATE_CANCELLED = 7 - PIPELINE_STATE_PAUSED = 8 - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/prediction_service.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/prediction_service.py deleted file mode 100644 index 82f5f9e052..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/prediction_service.py +++ /dev/null @@ -1,276 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.api import httpbody_pb2 # type: ignore -from google.cloud.aiplatform_v1beta1.types import explanation -from google.protobuf import struct_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'PredictRequest', - 'PredictResponse', - 'RawPredictRequest', - 'ExplainRequest', - 'ExplainResponse', - }, -) - - -class PredictRequest(proto.Message): - r"""Request message for - [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict]. - - Attributes: - endpoint (str): - Required. The name of the Endpoint requested to serve the - prediction. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - instances (Sequence[google.protobuf.struct_pb2.Value]): - Required. The instances that are the input to the prediction - call. A DeployedModel may have an upper limit on the number - of instances it supports per request, and when it is - exceeded the prediction call errors in case of AutoML - Models, or, in case of customer created Models, the - behaviour is as documented by that Model. The schema of any - single instance may be specified via Endpoint's - DeployedModels' - [Model's][google.cloud.aiplatform.v1beta1.DeployedModel.model] - [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]. - parameters (google.protobuf.struct_pb2.Value): - The parameters that govern the prediction. The schema of the - parameters may be specified via Endpoint's DeployedModels' - [Model's - ][google.cloud.aiplatform.v1beta1.DeployedModel.model] - [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - [parameters_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri]. - """ - - endpoint = proto.Field( - proto.STRING, - number=1, - ) - instances = proto.RepeatedField( - proto.MESSAGE, - number=2, - message=struct_pb2.Value, - ) - parameters = proto.Field( - proto.MESSAGE, - number=3, - message=struct_pb2.Value, - ) - - -class PredictResponse(proto.Message): - r"""Response message for - [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict]. - - Attributes: - predictions (Sequence[google.protobuf.struct_pb2.Value]): - The predictions that are the output of the predictions call. - The schema of any single prediction may be specified via - Endpoint's DeployedModels' [Model's - ][google.cloud.aiplatform.v1beta1.DeployedModel.model] - [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - [prediction_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.prediction_schema_uri]. - deployed_model_id (str): - ID of the Endpoint's DeployedModel that - served this prediction. - model (str): - Output only. The resource name of the Model - which is deployed as the DeployedModel that this - prediction hits. - model_display_name (str): - Output only. The [display - name][google.cloud.aiplatform.v1beta1.Model.display_name] of - the Model which is deployed as the DeployedModel that this - prediction hits. - """ - - predictions = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=struct_pb2.Value, - ) - deployed_model_id = proto.Field( - proto.STRING, - number=2, - ) - model = proto.Field( - proto.STRING, - number=3, - ) - model_display_name = proto.Field( - proto.STRING, - number=4, - ) - - -class RawPredictRequest(proto.Message): - r"""Request message for - [PredictionService.RawPredict][google.cloud.aiplatform.v1beta1.PredictionService.RawPredict]. - - Attributes: - endpoint (str): - Required. The name of the Endpoint requested to serve the - prediction. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - http_body (google.api.httpbody_pb2.HttpBody): - The prediction input. Supports HTTP headers and arbitrary - data payload. - - A - [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel] - may have an upper limit on the number of instances it - supports per request. When this limit it is exceeded for an - AutoML model, the - [RawPredict][google.cloud.aiplatform.v1beta1.PredictionService.RawPredict] - method returns an error. When this limit is exceeded for a - custom-trained model, the behavior varies depending on the - model. - - You can specify the schema for each instance in the - [predict_schemata.instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri] - field when you create a - [Model][google.cloud.aiplatform.v1beta1.Model]. This schema - applies when you deploy the ``Model`` as a ``DeployedModel`` - to an [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] - and use the ``RawPredict`` method. - """ - - endpoint = proto.Field( - proto.STRING, - number=1, - ) - http_body = proto.Field( - proto.MESSAGE, - number=2, - message=httpbody_pb2.HttpBody, - ) - - -class ExplainRequest(proto.Message): - r"""Request message for - [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]. - - Attributes: - endpoint (str): - Required. The name of the Endpoint requested to serve the - explanation. Format: - ``projects/{project}/locations/{location}/endpoints/{endpoint}`` - instances (Sequence[google.protobuf.struct_pb2.Value]): - Required. The instances that are the input to the - explanation call. A DeployedModel may have an upper limit on - the number of instances it supports per request, and when it - is exceeded the explanation call errors in case of AutoML - Models, or, in case of customer created Models, the - behaviour is as documented by that Model. The schema of any - single instance may be specified via Endpoint's - DeployedModels' - [Model's][google.cloud.aiplatform.v1beta1.DeployedModel.model] - [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]. - parameters (google.protobuf.struct_pb2.Value): - The parameters that govern the prediction. The schema of the - parameters may be specified via Endpoint's DeployedModels' - [Model's - ][google.cloud.aiplatform.v1beta1.DeployedModel.model] - [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] - [parameters_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri]. - explanation_spec_override (google.cloud.aiplatform_v1beta1.types.ExplanationSpecOverride): - If specified, overrides the - [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] - of the DeployedModel. Can be used for explaining prediction - results with different configurations, such as: - - - Explaining top-5 predictions results as opposed to top-1; - - Increasing path count or step count of the attribution - methods to reduce approximate errors; - - Using different baselines for explaining the prediction - results. - deployed_model_id (str): - If specified, this ExplainRequest will be served by the - chosen DeployedModel, overriding - [Endpoint.traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split]. - """ - - endpoint = proto.Field( - proto.STRING, - number=1, - ) - instances = proto.RepeatedField( - proto.MESSAGE, - number=2, - message=struct_pb2.Value, - ) - parameters = proto.Field( - proto.MESSAGE, - number=4, - message=struct_pb2.Value, - ) - explanation_spec_override = proto.Field( - proto.MESSAGE, - number=5, - message=explanation.ExplanationSpecOverride, - ) - deployed_model_id = proto.Field( - proto.STRING, - number=3, - ) - - -class ExplainResponse(proto.Message): - r"""Response message for - [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]. - - Attributes: - explanations (Sequence[google.cloud.aiplatform_v1beta1.types.Explanation]): - The explanations of the Model's - [PredictResponse.predictions][google.cloud.aiplatform.v1beta1.PredictResponse.predictions]. - - It has the same number of elements as - [instances][google.cloud.aiplatform.v1beta1.ExplainRequest.instances] - to be explained. - deployed_model_id (str): - ID of the Endpoint's DeployedModel that - served this explanation. - predictions (Sequence[google.protobuf.struct_pb2.Value]): - The predictions that are the output of the predictions call. - Same as - [PredictResponse.predictions][google.cloud.aiplatform.v1beta1.PredictResponse.predictions]. - """ - - explanations = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=explanation.Explanation, - ) - deployed_model_id = proto.Field( - proto.STRING, - number=2, - ) - predictions = proto.RepeatedField( - proto.MESSAGE, - number=3, - message=struct_pb2.Value, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/specialist_pool.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/specialist_pool.py deleted file mode 100644 index 61589d34c7..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/specialist_pool.py +++ /dev/null @@ -1,86 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'SpecialistPool', - }, -) - - -class SpecialistPool(proto.Message): - r"""SpecialistPool represents customers' own workforce to work on - their data labeling jobs. It includes a group of specialist - managers and workers. Managers are responsible for managing the - workers in this pool as well as customers' data labeling jobs - associated with this pool. Customers create specialist pool as - well as start data labeling jobs on Cloud, managers and workers - handle the jobs using CrowdCompute console. - - Attributes: - name (str): - Required. The resource name of the - SpecialistPool. - display_name (str): - Required. The user-defined name of the - SpecialistPool. The name can be up to 128 - characters long and can be consist of any UTF-8 - characters. - This field should be unique on project-level. - specialist_managers_count (int): - Output only. The number of managers in this - SpecialistPool. - specialist_manager_emails (Sequence[str]): - The email addresses of the managers in the - SpecialistPool. - pending_data_labeling_jobs (Sequence[str]): - Output only. The resource name of the pending - data labeling jobs. - specialist_worker_emails (Sequence[str]): - The email addresses of workers in the - SpecialistPool. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - specialist_managers_count = proto.Field( - proto.INT32, - number=3, - ) - specialist_manager_emails = proto.RepeatedField( - proto.STRING, - number=4, - ) - pending_data_labeling_jobs = proto.RepeatedField( - proto.STRING, - number=5, - ) - specialist_worker_emails = proto.RepeatedField( - proto.STRING, - number=7, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/specialist_pool_service.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/specialist_pool_service.py deleted file mode 100644 index c6ebb83779..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/specialist_pool_service.py +++ /dev/null @@ -1,237 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import operation -from google.cloud.aiplatform_v1beta1.types import specialist_pool as gca_specialist_pool -from google.protobuf import field_mask_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'CreateSpecialistPoolRequest', - 'CreateSpecialistPoolOperationMetadata', - 'GetSpecialistPoolRequest', - 'ListSpecialistPoolsRequest', - 'ListSpecialistPoolsResponse', - 'DeleteSpecialistPoolRequest', - 'UpdateSpecialistPoolRequest', - 'UpdateSpecialistPoolOperationMetadata', - }, -) - - -class CreateSpecialistPoolRequest(proto.Message): - r"""Request message for - [SpecialistPoolService.CreateSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.CreateSpecialistPool]. - - Attributes: - parent (str): - Required. The parent Project name for the new - SpecialistPool. The form is - ``projects/{project}/locations/{location}``. - specialist_pool (google.cloud.aiplatform_v1beta1.types.SpecialistPool): - Required. The SpecialistPool to create. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - specialist_pool = proto.Field( - proto.MESSAGE, - number=2, - message=gca_specialist_pool.SpecialistPool, - ) - - -class CreateSpecialistPoolOperationMetadata(proto.Message): - r"""Runtime operation information for - [SpecialistPoolService.CreateSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.CreateSpecialistPool]. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - The operation generic information. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class GetSpecialistPoolRequest(proto.Message): - r"""Request message for - [SpecialistPoolService.GetSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.GetSpecialistPool]. - - Attributes: - name (str): - Required. The name of the SpecialistPool resource. The form - is - ``projects/{project}/locations/{location}/specialistPools/{specialist_pool}``. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListSpecialistPoolsRequest(proto.Message): - r"""Request message for - [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1beta1.SpecialistPoolService.ListSpecialistPools]. - - Attributes: - parent (str): - Required. The name of the SpecialistPool's parent resource. - Format: ``projects/{project}/locations/{location}`` - page_size (int): - The standard list page size. - page_token (str): - The standard list page token. Typically obtained by - [ListSpecialistPoolsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListSpecialistPoolsResponse.next_page_token] - of the previous - [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1beta1.SpecialistPoolService.ListSpecialistPools] - call. Return first page if empty. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - FieldMask represents a set of - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - page_size = proto.Field( - proto.INT32, - number=2, - ) - page_token = proto.Field( - proto.STRING, - number=3, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=4, - message=field_mask_pb2.FieldMask, - ) - - -class ListSpecialistPoolsResponse(proto.Message): - r"""Response message for - [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1beta1.SpecialistPoolService.ListSpecialistPools]. - - Attributes: - specialist_pools (Sequence[google.cloud.aiplatform_v1beta1.types.SpecialistPool]): - A list of SpecialistPools that matches the - specified filter in the request. - next_page_token (str): - The standard List next-page token. - """ - - @property - def raw_page(self): - return self - - specialist_pools = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_specialist_pool.SpecialistPool, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class DeleteSpecialistPoolRequest(proto.Message): - r"""Request message for - [SpecialistPoolService.DeleteSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.DeleteSpecialistPool]. - - Attributes: - name (str): - Required. The resource name of the SpecialistPool to delete. - Format: - ``projects/{project}/locations/{location}/specialistPools/{specialist_pool}`` - force (bool): - If set to true, any specialist managers in - this SpecialistPool will also be deleted. - (Otherwise, the request will only work if the - SpecialistPool has no specialist managers.) - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - force = proto.Field( - proto.BOOL, - number=2, - ) - - -class UpdateSpecialistPoolRequest(proto.Message): - r"""Request message for - [SpecialistPoolService.UpdateSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.UpdateSpecialistPool]. - - Attributes: - specialist_pool (google.cloud.aiplatform_v1beta1.types.SpecialistPool): - Required. The SpecialistPool which replaces - the resource on the server. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The update mask applies to the - resource. - """ - - specialist_pool = proto.Field( - proto.MESSAGE, - number=1, - message=gca_specialist_pool.SpecialistPool, - ) - update_mask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - - -class UpdateSpecialistPoolOperationMetadata(proto.Message): - r"""Runtime operation metadata for - [SpecialistPoolService.UpdateSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.UpdateSpecialistPool]. - - Attributes: - specialist_pool (str): - Output only. The name of the SpecialistPool to which the - specialists are being added. Format: - ``projects/{project_id}/locations/{location_id}/specialistPools/{specialist_pool}`` - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - The operation generic information. - """ - - specialist_pool = proto.Field( - proto.STRING, - number=1, - ) - generic_metadata = proto.Field( - proto.MESSAGE, - number=2, - message=operation.GenericOperationMetadata, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/study.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/study.py deleted file mode 100644 index 22f36b56b1..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/study.py +++ /dev/null @@ -1,882 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'Study', - 'Trial', - 'StudySpec', - 'Measurement', - }, -) - - -class Study(proto.Message): - r"""LINT.IfChange - A message representing a Study. - - Attributes: - name (str): - Output only. The name of a study. The study's globally - unique identifier. Format: - ``projects/{project}/locations/{location}/studies/{study}`` - display_name (str): - Required. Describes the Study, default value - is empty string. - study_spec (google.cloud.aiplatform_v1beta1.types.StudySpec): - Required. Configuration of the Study. - state (google.cloud.aiplatform_v1beta1.types.Study.State): - Output only. The detailed state of a Study. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time at which the study was - created. - inactive_reason (str): - Output only. A human readable reason why the - Study is inactive. This should be empty if a - study is ACTIVE or COMPLETED. - """ - class State(proto.Enum): - r"""Describes the Study state.""" - STATE_UNSPECIFIED = 0 - ACTIVE = 1 - INACTIVE = 2 - COMPLETED = 3 - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - study_spec = proto.Field( - proto.MESSAGE, - number=3, - message='StudySpec', - ) - state = proto.Field( - proto.ENUM, - number=4, - enum=State, - ) - create_time = proto.Field( - proto.MESSAGE, - number=5, - message=timestamp_pb2.Timestamp, - ) - inactive_reason = proto.Field( - proto.STRING, - number=6, - ) - - -class Trial(proto.Message): - r"""A message representing a Trial. A Trial contains a unique set - of Parameters that has been or will be evaluated, along with the - objective metrics got by running the Trial. - - Attributes: - name (str): - Output only. Resource name of the Trial - assigned by the service. - id (str): - Output only. The identifier of the Trial - assigned by the service. - state (google.cloud.aiplatform_v1beta1.types.Trial.State): - Output only. The detailed state of the Trial. - parameters (Sequence[google.cloud.aiplatform_v1beta1.types.Trial.Parameter]): - Output only. The parameters of the Trial. - final_measurement (google.cloud.aiplatform_v1beta1.types.Measurement): - Output only. The final measurement containing - the objective value. - measurements (Sequence[google.cloud.aiplatform_v1beta1.types.Measurement]): - Output only. A list of measurements that are strictly - lexicographically ordered by their induced tuples (steps, - elapsed_duration). These are used for early stopping - computations. - start_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the Trial was started. - end_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the Trial's status changed to - ``SUCCEEDED`` or ``INFEASIBLE``. - client_id (str): - Output only. The identifier of the client that originally - requested this Trial. Each client is identified by a unique - client_id. When a client asks for a suggestion, Vertex AI - Vizier will assign it a Trial. The client should evaluate - the Trial, complete it, and report back to Vertex AI Vizier. - If suggestion is asked again by same client_id before the - Trial is completed, the same Trial will be returned. - Multiple clients with different client_ids can ask for - suggestions simultaneously, each of them will get their own - Trial. - infeasible_reason (str): - Output only. A human readable string describing why the - Trial is infeasible. This is set only if Trial state is - ``INFEASIBLE``. - custom_job (str): - Output only. The CustomJob name linked to the - Trial. It's set for a HyperparameterTuningJob's - Trial. - web_access_uris (Sequence[google.cloud.aiplatform_v1beta1.types.Trial.WebAccessUrisEntry]): - Output only. URIs for accessing `interactive - shells `__ - (one URI for each training node). Only available if this - trial is part of a - [HyperparameterTuningJob][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob] - and the job's - [trial_job_spec.enable_web_access][google.cloud.aiplatform.v1beta1.CustomJobSpec.enable_web_access] - field is ``true``. - - The keys are names of each node used for the trial; for - example, ``workerpool0-0`` for the primary node, - ``workerpool1-0`` for the first node in the second worker - pool, and ``workerpool1-1`` for the second node in the - second worker pool. - - The values are the URIs for each node's interactive shell. - """ - class State(proto.Enum): - r"""Describes a Trial state.""" - STATE_UNSPECIFIED = 0 - REQUESTED = 1 - ACTIVE = 2 - STOPPING = 3 - SUCCEEDED = 4 - INFEASIBLE = 5 - - class Parameter(proto.Message): - r"""A message representing a parameter to be tuned. - - Attributes: - parameter_id (str): - Output only. The ID of the parameter. The parameter should - be defined in [StudySpec's - Parameters][google.cloud.aiplatform.v1beta1.StudySpec.parameters]. - value (google.protobuf.struct_pb2.Value): - Output only. The value of the parameter. ``number_value`` - will be set if a parameter defined in StudySpec is in type - 'INTEGER', 'DOUBLE' or 'DISCRETE'. ``string_value`` will be - set if a parameter defined in StudySpec is in type - 'CATEGORICAL'. - """ - - parameter_id = proto.Field( - proto.STRING, - number=1, - ) - value = proto.Field( - proto.MESSAGE, - number=2, - message=struct_pb2.Value, - ) - - name = proto.Field( - proto.STRING, - number=1, - ) - id = proto.Field( - proto.STRING, - number=2, - ) - state = proto.Field( - proto.ENUM, - number=3, - enum=State, - ) - parameters = proto.RepeatedField( - proto.MESSAGE, - number=4, - message=Parameter, - ) - final_measurement = proto.Field( - proto.MESSAGE, - number=5, - message='Measurement', - ) - measurements = proto.RepeatedField( - proto.MESSAGE, - number=6, - message='Measurement', - ) - start_time = proto.Field( - proto.MESSAGE, - number=7, - message=timestamp_pb2.Timestamp, - ) - end_time = proto.Field( - proto.MESSAGE, - number=8, - message=timestamp_pb2.Timestamp, - ) - client_id = proto.Field( - proto.STRING, - number=9, - ) - infeasible_reason = proto.Field( - proto.STRING, - number=10, - ) - custom_job = proto.Field( - proto.STRING, - number=11, - ) - web_access_uris = proto.MapField( - proto.STRING, - proto.STRING, - number=12, - ) - - -class StudySpec(proto.Message): - r"""Represents specification of a Study. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - decay_curve_stopping_spec (google.cloud.aiplatform_v1beta1.types.StudySpec.DecayCurveAutomatedStoppingSpec): - The automated early stopping spec using decay - curve rule. - - This field is a member of `oneof`_ ``automated_stopping_spec``. - median_automated_stopping_spec (google.cloud.aiplatform_v1beta1.types.StudySpec.MedianAutomatedStoppingSpec): - The automated early stopping spec using - median rule. - - This field is a member of `oneof`_ ``automated_stopping_spec``. - convex_stop_config (google.cloud.aiplatform_v1beta1.types.StudySpec.ConvexStopConfig): - Deprecated. - The automated early stopping using convex - stopping rule. - - This field is a member of `oneof`_ ``automated_stopping_spec``. - metrics (Sequence[google.cloud.aiplatform_v1beta1.types.StudySpec.MetricSpec]): - Required. Metric specs for the Study. - parameters (Sequence[google.cloud.aiplatform_v1beta1.types.StudySpec.ParameterSpec]): - Required. The set of parameters to tune. - algorithm (google.cloud.aiplatform_v1beta1.types.StudySpec.Algorithm): - The search algorithm specified for the Study. - observation_noise (google.cloud.aiplatform_v1beta1.types.StudySpec.ObservationNoise): - The observation noise level of the study. - Currently only supported by the Vertex AI Vizier - service. Not supported by HyperparamterTuningJob - or TrainingPipeline. - measurement_selection_type (google.cloud.aiplatform_v1beta1.types.StudySpec.MeasurementSelectionType): - Describe which measurement selection type - will be used - """ - class Algorithm(proto.Enum): - r"""The available search algorithms for the Study.""" - ALGORITHM_UNSPECIFIED = 0 - GRID_SEARCH = 2 - RANDOM_SEARCH = 3 - - class ObservationNoise(proto.Enum): - r"""Describes the noise level of the repeated observations. - "Noisy" means that the repeated observations with the same Trial - parameters may lead to different metric evaluations. - """ - OBSERVATION_NOISE_UNSPECIFIED = 0 - LOW = 1 - HIGH = 2 - - class MeasurementSelectionType(proto.Enum): - r"""This indicates which measurement to use if/when the service - automatically selects the final measurement from previously reported - intermediate measurements. Choose this based on two considerations: - A) Do you expect your measurements to monotonically improve? If so, - choose LAST_MEASUREMENT. On the other hand, if you're in a situation - where your system can "over-train" and you expect the performance to - get better for a while but then start declining, choose - BEST_MEASUREMENT. B) Are your measurements significantly noisy - and/or irreproducible? If so, BEST_MEASUREMENT will tend to be - over-optimistic, and it may be better to choose LAST_MEASUREMENT. If - both or neither of (A) and (B) apply, it doesn't matter which - selection type is chosen. - """ - MEASUREMENT_SELECTION_TYPE_UNSPECIFIED = 0 - LAST_MEASUREMENT = 1 - BEST_MEASUREMENT = 2 - - class MetricSpec(proto.Message): - r"""Represents a metric to optimize. - - Attributes: - metric_id (str): - Required. The ID of the metric. Must not - contain whitespaces and must be unique amongst - all MetricSpecs. - goal (google.cloud.aiplatform_v1beta1.types.StudySpec.MetricSpec.GoalType): - Required. The optimization goal of the - metric. - """ - class GoalType(proto.Enum): - r"""The available types of optimization goals.""" - GOAL_TYPE_UNSPECIFIED = 0 - MAXIMIZE = 1 - MINIMIZE = 2 - - metric_id = proto.Field( - proto.STRING, - number=1, - ) - goal = proto.Field( - proto.ENUM, - number=2, - enum='StudySpec.MetricSpec.GoalType', - ) - - class ParameterSpec(proto.Message): - r"""Represents a single parameter to optimize. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - double_value_spec (google.cloud.aiplatform_v1beta1.types.StudySpec.ParameterSpec.DoubleValueSpec): - The value spec for a 'DOUBLE' parameter. - - This field is a member of `oneof`_ ``parameter_value_spec``. - integer_value_spec (google.cloud.aiplatform_v1beta1.types.StudySpec.ParameterSpec.IntegerValueSpec): - The value spec for an 'INTEGER' parameter. - - This field is a member of `oneof`_ ``parameter_value_spec``. - categorical_value_spec (google.cloud.aiplatform_v1beta1.types.StudySpec.ParameterSpec.CategoricalValueSpec): - The value spec for a 'CATEGORICAL' parameter. - - This field is a member of `oneof`_ ``parameter_value_spec``. - discrete_value_spec (google.cloud.aiplatform_v1beta1.types.StudySpec.ParameterSpec.DiscreteValueSpec): - The value spec for a 'DISCRETE' parameter. - - This field is a member of `oneof`_ ``parameter_value_spec``. - parameter_id (str): - Required. The ID of the parameter. Must not - contain whitespaces and must be unique amongst - all ParameterSpecs. - scale_type (google.cloud.aiplatform_v1beta1.types.StudySpec.ParameterSpec.ScaleType): - How the parameter should be scaled. Leave unset for - ``CATEGORICAL`` parameters. - conditional_parameter_specs (Sequence[google.cloud.aiplatform_v1beta1.types.StudySpec.ParameterSpec.ConditionalParameterSpec]): - A conditional parameter node is active if the parameter's - value matches the conditional node's parent_value_condition. - - If two items in conditional_parameter_specs have the same - name, they must have disjoint parent_value_condition. - """ - class ScaleType(proto.Enum): - r"""The type of scaling that should be applied to this parameter.""" - SCALE_TYPE_UNSPECIFIED = 0 - UNIT_LINEAR_SCALE = 1 - UNIT_LOG_SCALE = 2 - UNIT_REVERSE_LOG_SCALE = 3 - - class DoubleValueSpec(proto.Message): - r"""Value specification for a parameter in ``DOUBLE`` type. - - Attributes: - min_value (float): - Required. Inclusive minimum value of the - parameter. - max_value (float): - Required. Inclusive maximum value of the - parameter. - default_value (float): - A default value for a ``DOUBLE`` parameter that is assumed - to be a relatively good starting point. Unset value signals - that there is no offered starting point. - - Currently only supported by the Vertex AI Vizier service. - Not supported by HyperparamterTuningJob or TrainingPipeline. - - This field is a member of `oneof`_ ``_default_value``. - """ - - min_value = proto.Field( - proto.DOUBLE, - number=1, - ) - max_value = proto.Field( - proto.DOUBLE, - number=2, - ) - default_value = proto.Field( - proto.DOUBLE, - number=4, - optional=True, - ) - - class IntegerValueSpec(proto.Message): - r"""Value specification for a parameter in ``INTEGER`` type. - - Attributes: - min_value (int): - Required. Inclusive minimum value of the - parameter. - max_value (int): - Required. Inclusive maximum value of the - parameter. - default_value (int): - A default value for an ``INTEGER`` parameter that is assumed - to be a relatively good starting point. Unset value signals - that there is no offered starting point. - - Currently only supported by the Vertex AI Vizier service. - Not supported by HyperparamterTuningJob or TrainingPipeline. - - This field is a member of `oneof`_ ``_default_value``. - """ - - min_value = proto.Field( - proto.INT64, - number=1, - ) - max_value = proto.Field( - proto.INT64, - number=2, - ) - default_value = proto.Field( - proto.INT64, - number=4, - optional=True, - ) - - class CategoricalValueSpec(proto.Message): - r"""Value specification for a parameter in ``CATEGORICAL`` type. - - Attributes: - values (Sequence[str]): - Required. The list of possible categories. - default_value (str): - A default value for a ``CATEGORICAL`` parameter that is - assumed to be a relatively good starting point. Unset value - signals that there is no offered starting point. - - Currently only supported by the Vizier service. Not - supported by HyperparamterTuningJob or TrainingPipeline. - - This field is a member of `oneof`_ ``_default_value``. - """ - - values = proto.RepeatedField( - proto.STRING, - number=1, - ) - default_value = proto.Field( - proto.STRING, - number=3, - optional=True, - ) - - class DiscreteValueSpec(proto.Message): - r"""Value specification for a parameter in ``DISCRETE`` type. - - Attributes: - values (Sequence[float]): - Required. A list of possible values. - The list should be in increasing order and at - least 1e-10 apart. For instance, this parameter - might have possible settings of 1.5, 2.5, and - 4.0. This list should not contain more than - 1,000 values. - default_value (float): - A default value for a ``DISCRETE`` parameter that is assumed - to be a relatively good starting point. Unset value signals - that there is no offered starting point. It automatically - rounds to the nearest feasible discrete point. - - Currently only supported by the Vizier service. Not - supported by HyperparamterTuningJob or TrainingPipeline. - - This field is a member of `oneof`_ ``_default_value``. - """ - - values = proto.RepeatedField( - proto.DOUBLE, - number=1, - ) - default_value = proto.Field( - proto.DOUBLE, - number=3, - optional=True, - ) - - class ConditionalParameterSpec(proto.Message): - r"""Represents a parameter spec with condition from its parent - parameter. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - parent_discrete_values (google.cloud.aiplatform_v1beta1.types.StudySpec.ParameterSpec.ConditionalParameterSpec.DiscreteValueCondition): - The spec for matching values from a parent parameter of - ``DISCRETE`` type. - - This field is a member of `oneof`_ ``parent_value_condition``. - parent_int_values (google.cloud.aiplatform_v1beta1.types.StudySpec.ParameterSpec.ConditionalParameterSpec.IntValueCondition): - The spec for matching values from a parent parameter of - ``INTEGER`` type. - - This field is a member of `oneof`_ ``parent_value_condition``. - parent_categorical_values (google.cloud.aiplatform_v1beta1.types.StudySpec.ParameterSpec.ConditionalParameterSpec.CategoricalValueCondition): - The spec for matching values from a parent parameter of - ``CATEGORICAL`` type. - - This field is a member of `oneof`_ ``parent_value_condition``. - parameter_spec (google.cloud.aiplatform_v1beta1.types.StudySpec.ParameterSpec): - Required. The spec for a conditional - parameter. - """ - - class DiscreteValueCondition(proto.Message): - r"""Represents the spec to match discrete values from parent - parameter. - - Attributes: - values (Sequence[float]): - Required. Matches values of the parent parameter of - 'DISCRETE' type. All values must exist in - ``discrete_value_spec`` of parent parameter. - - The Epsilon of the value matching is 1e-10. - """ - - values = proto.RepeatedField( - proto.DOUBLE, - number=1, - ) - - class IntValueCondition(proto.Message): - r"""Represents the spec to match integer values from parent - parameter. - - Attributes: - values (Sequence[int]): - Required. Matches values of the parent parameter of - 'INTEGER' type. All values must lie in - ``integer_value_spec`` of parent parameter. - """ - - values = proto.RepeatedField( - proto.INT64, - number=1, - ) - - class CategoricalValueCondition(proto.Message): - r"""Represents the spec to match categorical values from parent - parameter. - - Attributes: - values (Sequence[str]): - Required. Matches values of the parent parameter of - 'CATEGORICAL' type. All values must exist in - ``categorical_value_spec`` of parent parameter. - """ - - values = proto.RepeatedField( - proto.STRING, - number=1, - ) - - parent_discrete_values = proto.Field( - proto.MESSAGE, - number=2, - oneof='parent_value_condition', - message='StudySpec.ParameterSpec.ConditionalParameterSpec.DiscreteValueCondition', - ) - parent_int_values = proto.Field( - proto.MESSAGE, - number=3, - oneof='parent_value_condition', - message='StudySpec.ParameterSpec.ConditionalParameterSpec.IntValueCondition', - ) - parent_categorical_values = proto.Field( - proto.MESSAGE, - number=4, - oneof='parent_value_condition', - message='StudySpec.ParameterSpec.ConditionalParameterSpec.CategoricalValueCondition', - ) - parameter_spec = proto.Field( - proto.MESSAGE, - number=1, - message='StudySpec.ParameterSpec', - ) - - double_value_spec = proto.Field( - proto.MESSAGE, - number=2, - oneof='parameter_value_spec', - message='StudySpec.ParameterSpec.DoubleValueSpec', - ) - integer_value_spec = proto.Field( - proto.MESSAGE, - number=3, - oneof='parameter_value_spec', - message='StudySpec.ParameterSpec.IntegerValueSpec', - ) - categorical_value_spec = proto.Field( - proto.MESSAGE, - number=4, - oneof='parameter_value_spec', - message='StudySpec.ParameterSpec.CategoricalValueSpec', - ) - discrete_value_spec = proto.Field( - proto.MESSAGE, - number=5, - oneof='parameter_value_spec', - message='StudySpec.ParameterSpec.DiscreteValueSpec', - ) - parameter_id = proto.Field( - proto.STRING, - number=1, - ) - scale_type = proto.Field( - proto.ENUM, - number=6, - enum='StudySpec.ParameterSpec.ScaleType', - ) - conditional_parameter_specs = proto.RepeatedField( - proto.MESSAGE, - number=10, - message='StudySpec.ParameterSpec.ConditionalParameterSpec', - ) - - class DecayCurveAutomatedStoppingSpec(proto.Message): - r"""The decay curve automated stopping rule builds a Gaussian - Process Regressor to predict the final objective value of a - Trial based on the already completed Trials and the intermediate - measurements of the current Trial. Early stopping is requested - for the current Trial if there is very low probability to exceed - the optimal value found so far. - - Attributes: - use_elapsed_duration (bool): - True if - [Measurement.elapsed_duration][google.cloud.aiplatform.v1beta1.Measurement.elapsed_duration] - is used as the x-axis of each Trials Decay Curve. Otherwise, - [Measurement.step_count][google.cloud.aiplatform.v1beta1.Measurement.step_count] - will be used as the x-axis. - """ - - use_elapsed_duration = proto.Field( - proto.BOOL, - number=1, - ) - - class MedianAutomatedStoppingSpec(proto.Message): - r"""The median automated stopping rule stops a pending Trial if the - Trial's best objective_value is strictly below the median - 'performance' of all completed Trials reported up to the Trial's - last measurement. Currently, 'performance' refers to the running - average of the objective values reported by the Trial in each - measurement. - - Attributes: - use_elapsed_duration (bool): - True if median automated stopping rule applies on - [Measurement.elapsed_duration][google.cloud.aiplatform.v1beta1.Measurement.elapsed_duration]. - It means that elapsed_duration field of latest measurement - of current Trial is used to compute median objective value - for each completed Trials. - """ - - use_elapsed_duration = proto.Field( - proto.BOOL, - number=1, - ) - - class ConvexStopConfig(proto.Message): - r"""Configuration for ConvexStopPolicy. - - Attributes: - max_num_steps (int): - Steps used in predicting the final objective for early - stopped trials. In general, it's set to be the same as the - defined steps in training / tuning. When use_steps is false, - this field is set to the maximum elapsed seconds. - min_num_steps (int): - Minimum number of steps for a trial to complete. Trials - which do not have a measurement with num_steps > - min_num_steps won't be considered for early stopping. It's - ok to set it to 0, and a trial can be early stopped at any - stage. By default, min_num_steps is set to be one-tenth of - the max_num_steps. When use_steps is false, this field is - set to the minimum elapsed seconds. - autoregressive_order (int): - The number of Trial measurements used in - autoregressive model for value prediction. A - trial won't be considered early stopping if has - fewer measurement points. - learning_rate_parameter_name (str): - The hyper-parameter name used in the tuning job that stands - for learning rate. Leave it blank if learning rate is not in - a parameter in tuning. The learning_rate is used to estimate - the objective value of the ongoing trial. - use_seconds (bool): - This bool determines whether or not the rule is applied - based on elapsed_secs or steps. If use_seconds==false, the - early stopping decision is made according to the predicted - objective values according to the target steps. If - use_seconds==true, elapsed_secs is used instead of steps. - Also, in this case, the parameters max_num_steps and - min_num_steps are overloaded to contain max_elapsed_seconds - and min_elapsed_seconds. - """ - - max_num_steps = proto.Field( - proto.INT64, - number=1, - ) - min_num_steps = proto.Field( - proto.INT64, - number=2, - ) - autoregressive_order = proto.Field( - proto.INT64, - number=3, - ) - learning_rate_parameter_name = proto.Field( - proto.STRING, - number=4, - ) - use_seconds = proto.Field( - proto.BOOL, - number=5, - ) - - decay_curve_stopping_spec = proto.Field( - proto.MESSAGE, - number=4, - oneof='automated_stopping_spec', - message=DecayCurveAutomatedStoppingSpec, - ) - median_automated_stopping_spec = proto.Field( - proto.MESSAGE, - number=5, - oneof='automated_stopping_spec', - message=MedianAutomatedStoppingSpec, - ) - convex_stop_config = proto.Field( - proto.MESSAGE, - number=8, - oneof='automated_stopping_spec', - message=ConvexStopConfig, - ) - metrics = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=MetricSpec, - ) - parameters = proto.RepeatedField( - proto.MESSAGE, - number=2, - message=ParameterSpec, - ) - algorithm = proto.Field( - proto.ENUM, - number=3, - enum=Algorithm, - ) - observation_noise = proto.Field( - proto.ENUM, - number=6, - enum=ObservationNoise, - ) - measurement_selection_type = proto.Field( - proto.ENUM, - number=7, - enum=MeasurementSelectionType, - ) - - -class Measurement(proto.Message): - r"""A message representing a Measurement of a Trial. A - Measurement contains the Metrics got by executing a Trial using - suggested hyperparameter values. - - Attributes: - elapsed_duration (google.protobuf.duration_pb2.Duration): - Output only. Time that the Trial has been - running at the point of this Measurement. - step_count (int): - Output only. The number of steps the machine - learning model has been trained for. Must be - non-negative. - metrics (Sequence[google.cloud.aiplatform_v1beta1.types.Measurement.Metric]): - Output only. A list of metrics got by - evaluating the objective functions using - suggested Parameter values. - """ - - class Metric(proto.Message): - r"""A message representing a metric in the measurement. - - Attributes: - metric_id (str): - Output only. The ID of the Metric. The Metric should be - defined in [StudySpec's - Metrics][google.cloud.aiplatform.v1beta1.StudySpec.metrics]. - value (float): - Output only. The value for this metric. - """ - - metric_id = proto.Field( - proto.STRING, - number=1, - ) - value = proto.Field( - proto.DOUBLE, - number=2, - ) - - elapsed_duration = proto.Field( - proto.MESSAGE, - number=1, - message=duration_pb2.Duration, - ) - step_count = proto.Field( - proto.INT64, - number=2, - ) - metrics = proto.RepeatedField( - proto.MESSAGE, - number=3, - message=Metric, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard.py deleted file mode 100644 index a984eb652d..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard.py +++ /dev/null @@ -1,131 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'Tensorboard', - }, -) - - -class Tensorboard(proto.Message): - r"""Tensorboard is a physical database that stores users' - training metrics. A default Tensorboard is provided in each - region of a GCP project. If needed users can also create extra - Tensorboards in their projects. - - Attributes: - name (str): - Output only. Name of the Tensorboard. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` - display_name (str): - Required. User provided name of this - Tensorboard. - description (str): - Description of this Tensorboard. - encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec): - Customer-managed encryption key spec for a - Tensorboard. If set, this Tensorboard and all - sub-resources of this Tensorboard will be - secured by this key. - blob_storage_path_prefix (str): - Output only. Consumer project Cloud Storage - path prefix used to store blob data, which can - either be a bucket or directory. Does not end - with a '/'. - run_count (int): - Output only. The number of Runs stored in - this Tensorboard. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Tensorboard - was created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this Tensorboard - was last updated. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.Tensorboard.LabelsEntry]): - The labels with user-defined metadata to - organize your Tensorboards. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. No more than 64 user labels can be - associated with one Tensorboard (System labels - are excluded). - - See https://goo.gl/xmQnxf for more information - and examples of labels. System reserved label - keys are prefixed with - "aiplatform.googleapis.com/" and are immutable. - etag (str): - Used to perform a consistent read-modify- - rite updates. If not set, a blind "overwrite" - update happens. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - description = proto.Field( - proto.STRING, - number=3, - ) - encryption_spec = proto.Field( - proto.MESSAGE, - number=11, - message=gca_encryption_spec.EncryptionSpec, - ) - blob_storage_path_prefix = proto.Field( - proto.STRING, - number=10, - ) - run_count = proto.Field( - proto.INT32, - number=5, - ) - create_time = proto.Field( - proto.MESSAGE, - number=6, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=7, - message=timestamp_pb2.Timestamp, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=8, - ) - etag = proto.Field( - proto.STRING, - number=9, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_data.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_data.py deleted file mode 100644 index 89370a17e3..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_data.py +++ /dev/null @@ -1,205 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'TimeSeriesData', - 'TimeSeriesDataPoint', - 'Scalar', - 'TensorboardTensor', - 'TensorboardBlobSequence', - 'TensorboardBlob', - }, -) - - -class TimeSeriesData(proto.Message): - r"""All the data stored in a TensorboardTimeSeries. - - Attributes: - tensorboard_time_series_id (str): - Required. The ID of the - TensorboardTimeSeries, which will become the - final component of the TensorboardTimeSeries' - resource name - value_type (google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries.ValueType): - Required. Immutable. The value type of this - time series. All the values in this time series - data must match this value type. - values (Sequence[google.cloud.aiplatform_v1beta1.types.TimeSeriesDataPoint]): - Required. Data points in this time series. - """ - - tensorboard_time_series_id = proto.Field( - proto.STRING, - number=1, - ) - value_type = proto.Field( - proto.ENUM, - number=2, - enum=tensorboard_time_series.TensorboardTimeSeries.ValueType, - ) - values = proto.RepeatedField( - proto.MESSAGE, - number=3, - message='TimeSeriesDataPoint', - ) - - -class TimeSeriesDataPoint(proto.Message): - r"""A TensorboardTimeSeries data point. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - scalar (google.cloud.aiplatform_v1beta1.types.Scalar): - A scalar value. - - This field is a member of `oneof`_ ``value``. - tensor (google.cloud.aiplatform_v1beta1.types.TensorboardTensor): - A tensor value. - - This field is a member of `oneof`_ ``value``. - blobs (google.cloud.aiplatform_v1beta1.types.TensorboardBlobSequence): - A blob sequence value. - - This field is a member of `oneof`_ ``value``. - wall_time (google.protobuf.timestamp_pb2.Timestamp): - Wall clock timestamp when this data point is - generated by the end user. - step (int): - Step index of this data point within the run. - """ - - scalar = proto.Field( - proto.MESSAGE, - number=3, - oneof='value', - message='Scalar', - ) - tensor = proto.Field( - proto.MESSAGE, - number=4, - oneof='value', - message='TensorboardTensor', - ) - blobs = proto.Field( - proto.MESSAGE, - number=5, - oneof='value', - message='TensorboardBlobSequence', - ) - wall_time = proto.Field( - proto.MESSAGE, - number=1, - message=timestamp_pb2.Timestamp, - ) - step = proto.Field( - proto.INT64, - number=2, - ) - - -class Scalar(proto.Message): - r"""One point viewable on a scalar metric plot. - - Attributes: - value (float): - Value of the point at this step / timestamp. - """ - - value = proto.Field( - proto.DOUBLE, - number=1, - ) - - -class TensorboardTensor(proto.Message): - r"""One point viewable on a tensor metric plot. - - Attributes: - value (bytes): - Required. Serialized form of - https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/framework/tensor.proto - version_number (int): - Optional. Version number of TensorProto used to serialize - [value][google.cloud.aiplatform.v1beta1.TensorboardTensor.value]. - """ - - value = proto.Field( - proto.BYTES, - number=1, - ) - version_number = proto.Field( - proto.INT32, - number=2, - ) - - -class TensorboardBlobSequence(proto.Message): - r"""One point viewable on a blob metric plot, but mostly just a wrapper - message to work around repeated fields can't be used directly within - ``oneof`` fields. - - Attributes: - values (Sequence[google.cloud.aiplatform_v1beta1.types.TensorboardBlob]): - List of blobs contained within the sequence. - """ - - values = proto.RepeatedField( - proto.MESSAGE, - number=1, - message='TensorboardBlob', - ) - - -class TensorboardBlob(proto.Message): - r"""One blob (e.g, image, graph) viewable on a blob metric plot. - - Attributes: - id (str): - Output only. A URI safe key uniquely - identifying a blob. Can be used to locate the - blob stored in the Cloud Storage bucket of the - consumer project. - data (bytes): - Optional. The bytes of the blob is not - present unless it's returned by the - ReadTensorboardBlobData endpoint. - """ - - id = proto.Field( - proto.STRING, - number=1, - ) - data = proto.Field( - proto.BYTES, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_experiment.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_experiment.py deleted file mode 100644 index 498bb15565..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_experiment.py +++ /dev/null @@ -1,115 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'TensorboardExperiment', - }, -) - - -class TensorboardExperiment(proto.Message): - r"""A TensorboardExperiment is a group of TensorboardRuns, that - are typically the results of a training job run, in a - Tensorboard. - - Attributes: - name (str): - Output only. Name of the TensorboardExperiment. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` - display_name (str): - User provided name of this - TensorboardExperiment. - description (str): - Description of this TensorboardExperiment. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - TensorboardExperiment was created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - TensorboardExperiment was last updated. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.TensorboardExperiment.LabelsEntry]): - The labels with user-defined metadata to organize your - Datasets. - - Label keys and values can be no longer than 64 characters - (Unicode codepoints), can only contain lowercase letters, - numeric characters, underscores and dashes. International - characters are allowed. No more than 64 user labels can be - associated with one Dataset (System labels are excluded). - - See https://goo.gl/xmQnxf for more information and examples - of labels. System reserved label keys are prefixed with - "aiplatform.googleapis.com/" and are immutable. Following - system labels exist for each Dataset: - - - "aiplatform.googleapis.com/dataset_metadata_schema": - - - output only, its value is the - [metadata_schema's][metadata_schema_uri] title. - etag (str): - Used to perform consistent read-modify-write - updates. If not set, a blind "overwrite" update - happens. - source (str): - Immutable. Source of the - TensorboardExperiment. Example: a custom - training job. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - description = proto.Field( - proto.STRING, - number=3, - ) - create_time = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=5, - message=timestamp_pb2.Timestamp, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=6, - ) - etag = proto.Field( - proto.STRING, - number=7, - ) - source = proto.Field( - proto.STRING, - number=8, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_run.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_run.py deleted file mode 100644 index 48d7ab8223..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_run.py +++ /dev/null @@ -1,112 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'TensorboardRun', - }, -) - - -class TensorboardRun(proto.Message): - r"""TensorboardRun maps to a specific execution of a training job - with a given set of hyperparameter values, model definition, - dataset, etc - - Attributes: - name (str): - Output only. Name of the TensorboardRun. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` - display_name (str): - Required. User provided name of this - TensorboardRun. This value must be unique among - all TensorboardRuns belonging to the same parent - TensorboardExperiment. - description (str): - Description of this TensorboardRun. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - TensorboardRun was created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - TensorboardRun was last updated. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.TensorboardRun.LabelsEntry]): - The labels with user-defined metadata to organize your - TensorboardRuns. - - This field will be used to filter and visualize Runs in the - Tensorboard UI. For example, a Vertex AI training job can - set a label aiplatform.googleapis.com/training_job_id=xxxxx - to all the runs created within that job. An end user can set - a label experiment_id=xxxxx for all the runs produced in a - Jupyter notebook. These runs can be grouped by a label value - and visualized together in the Tensorboard UI. - - Label keys and values can be no longer than 64 characters - (Unicode codepoints), can only contain lowercase letters, - numeric characters, underscores and dashes. International - characters are allowed. No more than 64 user labels can be - associated with one TensorboardRun (System labels are - excluded). - - See https://goo.gl/xmQnxf for more information and examples - of labels. System reserved label keys are prefixed with - "aiplatform.googleapis.com/" and are immutable. - etag (str): - Used to perform a consistent read-modify- - rite updates. If not set, a blind "overwrite" - update happens. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - description = proto.Field( - proto.STRING, - number=3, - ) - create_time = proto.Field( - proto.MESSAGE, - number=6, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=7, - message=timestamp_pb2.Timestamp, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=8, - ) - etag = proto.Field( - proto.STRING, - number=9, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_service.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_service.py deleted file mode 100644 index c1561a51ed..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_service.py +++ /dev/null @@ -1,1224 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import operation -from google.cloud.aiplatform_v1beta1.types import tensorboard as gca_tensorboard -from google.cloud.aiplatform_v1beta1.types import tensorboard_data -from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment as gca_tensorboard_experiment -from google.cloud.aiplatform_v1beta1.types import tensorboard_run as gca_tensorboard_run -from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series as gca_tensorboard_time_series -from google.protobuf import field_mask_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'CreateTensorboardRequest', - 'GetTensorboardRequest', - 'ListTensorboardsRequest', - 'ListTensorboardsResponse', - 'UpdateTensorboardRequest', - 'DeleteTensorboardRequest', - 'CreateTensorboardExperimentRequest', - 'GetTensorboardExperimentRequest', - 'ListTensorboardExperimentsRequest', - 'ListTensorboardExperimentsResponse', - 'UpdateTensorboardExperimentRequest', - 'DeleteTensorboardExperimentRequest', - 'BatchCreateTensorboardRunsRequest', - 'BatchCreateTensorboardRunsResponse', - 'CreateTensorboardRunRequest', - 'GetTensorboardRunRequest', - 'ReadTensorboardBlobDataRequest', - 'ReadTensorboardBlobDataResponse', - 'ListTensorboardRunsRequest', - 'ListTensorboardRunsResponse', - 'UpdateTensorboardRunRequest', - 'DeleteTensorboardRunRequest', - 'BatchCreateTensorboardTimeSeriesRequest', - 'BatchCreateTensorboardTimeSeriesResponse', - 'CreateTensorboardTimeSeriesRequest', - 'GetTensorboardTimeSeriesRequest', - 'ListTensorboardTimeSeriesRequest', - 'ListTensorboardTimeSeriesResponse', - 'UpdateTensorboardTimeSeriesRequest', - 'DeleteTensorboardTimeSeriesRequest', - 'BatchReadTensorboardTimeSeriesDataRequest', - 'BatchReadTensorboardTimeSeriesDataResponse', - 'ReadTensorboardTimeSeriesDataRequest', - 'ReadTensorboardTimeSeriesDataResponse', - 'WriteTensorboardExperimentDataRequest', - 'WriteTensorboardExperimentDataResponse', - 'WriteTensorboardRunDataRequest', - 'WriteTensorboardRunDataResponse', - 'ExportTensorboardTimeSeriesDataRequest', - 'ExportTensorboardTimeSeriesDataResponse', - 'CreateTensorboardOperationMetadata', - 'UpdateTensorboardOperationMetadata', - }, -) - - -class CreateTensorboardRequest(proto.Message): - r"""Request message for - [TensorboardService.CreateTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboard]. - - Attributes: - parent (str): - Required. The resource name of the Location to create the - Tensorboard in. Format: - ``projects/{project}/locations/{location}`` - tensorboard (google.cloud.aiplatform_v1beta1.types.Tensorboard): - Required. The Tensorboard to create. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - tensorboard = proto.Field( - proto.MESSAGE, - number=2, - message=gca_tensorboard.Tensorboard, - ) - - -class GetTensorboardRequest(proto.Message): - r"""Request message for - [TensorboardService.GetTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboard]. - - Attributes: - name (str): - Required. The name of the Tensorboard resource. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListTensorboardsRequest(proto.Message): - r"""Request message for - [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboards]. - - Attributes: - parent (str): - Required. The resource name of the Location to list - Tensorboards. Format: - ``projects/{project}/locations/{location}`` - filter (str): - Lists the Tensorboards that match the filter - expression. - page_size (int): - The maximum number of Tensorboards to return. - The service may return fewer than this value. If - unspecified, at most 100 Tensorboards will be - returned. The maximum value is 100; values above - 100 will be coerced to 100. - page_token (str): - A page token, received from a previous - [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboards] - call. Provide this to retrieve the subsequent page. - - When paginating, all other parameters provided to - [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboards] - must match the call that provided the page token. - order_by (str): - Field to use to sort the list. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - order_by = proto.Field( - proto.STRING, - number=5, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=6, - message=field_mask_pb2.FieldMask, - ) - - -class ListTensorboardsResponse(proto.Message): - r"""Response message for - [TensorboardService.ListTensorboards][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboards]. - - Attributes: - tensorboards (Sequence[google.cloud.aiplatform_v1beta1.types.Tensorboard]): - The Tensorboards mathching the request. - next_page_token (str): - A token, which can be sent as - [ListTensorboardsRequest.page_token][google.cloud.aiplatform.v1beta1.ListTensorboardsRequest.page_token] - to retrieve the next page. If this field is omitted, there - are no subsequent pages. - """ - - @property - def raw_page(self): - return self - - tensorboards = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_tensorboard.Tensorboard, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class UpdateTensorboardRequest(proto.Message): - r"""Request message for - [TensorboardService.UpdateTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboard]. - - Attributes: - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. Field mask is used to specify the fields to be - overwritten in the Tensorboard resource by the update. The - fields specified in the update_mask are relative to the - resource, not the full request. A field will be overwritten - if it is in the mask. If the user does not provide a mask - then all fields will be overwritten if new values are - specified. - tensorboard (google.cloud.aiplatform_v1beta1.types.Tensorboard): - Required. The Tensorboard's ``name`` field is used to - identify the Tensorboard to be updated. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` - """ - - update_mask = proto.Field( - proto.MESSAGE, - number=1, - message=field_mask_pb2.FieldMask, - ) - tensorboard = proto.Field( - proto.MESSAGE, - number=2, - message=gca_tensorboard.Tensorboard, - ) - - -class DeleteTensorboardRequest(proto.Message): - r"""Request message for - [TensorboardService.DeleteTensorboard][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboard]. - - Attributes: - name (str): - Required. The name of the Tensorboard to be deleted. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class CreateTensorboardExperimentRequest(proto.Message): - r"""Request message for - [TensorboardService.CreateTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardExperiment]. - - Attributes: - parent (str): - Required. The resource name of the Tensorboard to create the - TensorboardExperiment in. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` - tensorboard_experiment (google.cloud.aiplatform_v1beta1.types.TensorboardExperiment): - The TensorboardExperiment to create. - tensorboard_experiment_id (str): - Required. The ID to use for the Tensorboard experiment, - which will become the final component of the Tensorboard - experiment's resource name. - - This value should be 1-128 characters, and valid characters - are /[a-z][0-9]-/. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - tensorboard_experiment = proto.Field( - proto.MESSAGE, - number=2, - message=gca_tensorboard_experiment.TensorboardExperiment, - ) - tensorboard_experiment_id = proto.Field( - proto.STRING, - number=3, - ) - - -class GetTensorboardExperimentRequest(proto.Message): - r"""Request message for - [TensorboardService.GetTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardExperiment]. - - Attributes: - name (str): - Required. The name of the TensorboardExperiment resource. - Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListTensorboardExperimentsRequest(proto.Message): - r"""Request message for - [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardExperiments]. - - Attributes: - parent (str): - Required. The resource name of the - Tensorboard to list TensorboardExperiments. - Format: - 'projects/{project}/locations/{location}/tensorboards/{tensorboard}' - filter (str): - Lists the TensorboardExperiments that match - the filter expression. - page_size (int): - The maximum number of TensorboardExperiments - to return. The service may return fewer than - this value. If unspecified, at most 50 - TensorboardExperiments will be returned. The - maximum value is 1000; values above 1000 will be - coerced to 1000. - page_token (str): - A page token, received from a previous - [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardExperiments] - call. Provide this to retrieve the subsequent page. - - When paginating, all other parameters provided to - [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardExperiments] - must match the call that provided the page token. - order_by (str): - Field to use to sort the list. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - order_by = proto.Field( - proto.STRING, - number=5, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=6, - message=field_mask_pb2.FieldMask, - ) - - -class ListTensorboardExperimentsResponse(proto.Message): - r"""Response message for - [TensorboardService.ListTensorboardExperiments][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardExperiments]. - - Attributes: - tensorboard_experiments (Sequence[google.cloud.aiplatform_v1beta1.types.TensorboardExperiment]): - The TensorboardExperiments mathching the - request. - next_page_token (str): - A token, which can be sent as - [ListTensorboardExperimentsRequest.page_token][google.cloud.aiplatform.v1beta1.ListTensorboardExperimentsRequest.page_token] - to retrieve the next page. If this field is omitted, there - are no subsequent pages. - """ - - @property - def raw_page(self): - return self - - tensorboard_experiments = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_tensorboard_experiment.TensorboardExperiment, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class UpdateTensorboardExperimentRequest(proto.Message): - r"""Request message for - [TensorboardService.UpdateTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardExperiment]. - - Attributes: - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. Field mask is used to specify the fields to be - overwritten in the TensorboardExperiment resource by the - update. The fields specified in the update_mask are relative - to the resource, not the full request. A field will be - overwritten if it is in the mask. If the user does not - provide a mask then all fields will be overwritten if new - values are specified. - tensorboard_experiment (google.cloud.aiplatform_v1beta1.types.TensorboardExperiment): - Required. The TensorboardExperiment's ``name`` field is used - to identify the TensorboardExperiment to be updated. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` - """ - - update_mask = proto.Field( - proto.MESSAGE, - number=1, - message=field_mask_pb2.FieldMask, - ) - tensorboard_experiment = proto.Field( - proto.MESSAGE, - number=2, - message=gca_tensorboard_experiment.TensorboardExperiment, - ) - - -class DeleteTensorboardExperimentRequest(proto.Message): - r"""Request message for - [TensorboardService.DeleteTensorboardExperiment][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardExperiment]. - - Attributes: - name (str): - Required. The name of the TensorboardExperiment to be - deleted. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class BatchCreateTensorboardRunsRequest(proto.Message): - r"""Request message for - [TensorboardService.BatchCreateTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardRuns]. - - Attributes: - parent (str): - Required. The resource name of the TensorboardExperiment to - create the TensorboardRuns in. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` - The parent field in the CreateTensorboardRunRequest messages - must match this field. - requests (Sequence[google.cloud.aiplatform_v1beta1.types.CreateTensorboardRunRequest]): - Required. The request message specifying the - TensorboardRuns to create. A maximum of 1000 - TensorboardRuns can be created in a batch. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - requests = proto.RepeatedField( - proto.MESSAGE, - number=2, - message='CreateTensorboardRunRequest', - ) - - -class BatchCreateTensorboardRunsResponse(proto.Message): - r"""Response message for - [TensorboardService.BatchCreateTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardRuns]. - - Attributes: - tensorboard_runs (Sequence[google.cloud.aiplatform_v1beta1.types.TensorboardRun]): - The created TensorboardRuns. - """ - - tensorboard_runs = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_tensorboard_run.TensorboardRun, - ) - - -class CreateTensorboardRunRequest(proto.Message): - r"""Request message for - [TensorboardService.CreateTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardRun]. - - Attributes: - parent (str): - Required. The resource name of the TensorboardExperiment to - create the TensorboardRun in. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` - tensorboard_run (google.cloud.aiplatform_v1beta1.types.TensorboardRun): - Required. The TensorboardRun to create. - tensorboard_run_id (str): - Required. The ID to use for the Tensorboard run, which will - become the final component of the Tensorboard run's resource - name. - - This value should be 1-128 characters, and valid characters - are /[a-z][0-9]-/. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - tensorboard_run = proto.Field( - proto.MESSAGE, - number=2, - message=gca_tensorboard_run.TensorboardRun, - ) - tensorboard_run_id = proto.Field( - proto.STRING, - number=3, - ) - - -class GetTensorboardRunRequest(proto.Message): - r"""Request message for - [TensorboardService.GetTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardRun]. - - Attributes: - name (str): - Required. The name of the TensorboardRun resource. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ReadTensorboardBlobDataRequest(proto.Message): - r"""Request message for - [TensorboardService.ReadTensorboardBlobData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardBlobData]. - - Attributes: - time_series (str): - Required. The resource name of the TensorboardTimeSeries to - list Blobs. Format: - 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}' - blob_ids (Sequence[str]): - IDs of the blobs to read. - """ - - time_series = proto.Field( - proto.STRING, - number=1, - ) - blob_ids = proto.RepeatedField( - proto.STRING, - number=2, - ) - - -class ReadTensorboardBlobDataResponse(proto.Message): - r"""Response message for - [TensorboardService.ReadTensorboardBlobData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardBlobData]. - - Attributes: - blobs (Sequence[google.cloud.aiplatform_v1beta1.types.TensorboardBlob]): - Blob messages containing blob bytes. - """ - - blobs = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=tensorboard_data.TensorboardBlob, - ) - - -class ListTensorboardRunsRequest(proto.Message): - r"""Request message for - [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardRuns]. - - Attributes: - parent (str): - Required. The resource name of the - TensorboardExperiment to list TensorboardRuns. - Format: - 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}' - filter (str): - Lists the TensorboardRuns that match the - filter expression. - page_size (int): - The maximum number of TensorboardRuns to - return. The service may return fewer than this - value. If unspecified, at most 50 - TensorboardRuns will be returned. The maximum - value is 1000; values above 1000 will be coerced - to 1000. - page_token (str): - A page token, received from a previous - [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardRuns] - call. Provide this to retrieve the subsequent page. - - When paginating, all other parameters provided to - [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardRuns] - must match the call that provided the page token. - order_by (str): - Field to use to sort the list. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - order_by = proto.Field( - proto.STRING, - number=5, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=6, - message=field_mask_pb2.FieldMask, - ) - - -class ListTensorboardRunsResponse(proto.Message): - r"""Response message for - [TensorboardService.ListTensorboardRuns][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardRuns]. - - Attributes: - tensorboard_runs (Sequence[google.cloud.aiplatform_v1beta1.types.TensorboardRun]): - The TensorboardRuns mathching the request. - next_page_token (str): - A token, which can be sent as - [ListTensorboardRunsRequest.page_token][google.cloud.aiplatform.v1beta1.ListTensorboardRunsRequest.page_token] - to retrieve the next page. If this field is omitted, there - are no subsequent pages. - """ - - @property - def raw_page(self): - return self - - tensorboard_runs = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_tensorboard_run.TensorboardRun, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class UpdateTensorboardRunRequest(proto.Message): - r"""Request message for - [TensorboardService.UpdateTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardRun]. - - Attributes: - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. Field mask is used to specify the fields to be - overwritten in the TensorboardRun resource by the update. - The fields specified in the update_mask are relative to the - resource, not the full request. A field will be overwritten - if it is in the mask. If the user does not provide a mask - then all fields will be overwritten if new values are - specified. - tensorboard_run (google.cloud.aiplatform_v1beta1.types.TensorboardRun): - Required. The TensorboardRun's ``name`` field is used to - identify the TensorboardRun to be updated. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` - """ - - update_mask = proto.Field( - proto.MESSAGE, - number=1, - message=field_mask_pb2.FieldMask, - ) - tensorboard_run = proto.Field( - proto.MESSAGE, - number=2, - message=gca_tensorboard_run.TensorboardRun, - ) - - -class DeleteTensorboardRunRequest(proto.Message): - r"""Request message for - [TensorboardService.DeleteTensorboardRun][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardRun]. - - Attributes: - name (str): - Required. The name of the TensorboardRun to be deleted. - Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class BatchCreateTensorboardTimeSeriesRequest(proto.Message): - r"""Request message for - [TensorboardService.BatchCreateTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardTimeSeries]. - - Attributes: - parent (str): - Required. The resource name of the TensorboardExperiment to - create the TensorboardTimeSeries in. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` - The TensorboardRuns referenced by the parent fields in the - CreateTensorboardTimeSeriesRequest messages must be sub - resources of this TensorboardExperiment. - requests (Sequence[google.cloud.aiplatform_v1beta1.types.CreateTensorboardTimeSeriesRequest]): - Required. The request message specifying the - TensorboardTimeSeries to create. A maximum of - 1000 TensorboardTimeSeries can be created in a - batch. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - requests = proto.RepeatedField( - proto.MESSAGE, - number=2, - message='CreateTensorboardTimeSeriesRequest', - ) - - -class BatchCreateTensorboardTimeSeriesResponse(proto.Message): - r"""Response message for - [TensorboardService.BatchCreateTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardTimeSeries]. - - Attributes: - tensorboard_time_series (Sequence[google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries]): - The created TensorboardTimeSeries. - """ - - tensorboard_time_series = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_tensorboard_time_series.TensorboardTimeSeries, - ) - - -class CreateTensorboardTimeSeriesRequest(proto.Message): - r"""Request message for - [TensorboardService.CreateTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardTimeSeries]. - - Attributes: - parent (str): - Required. The resource name of the TensorboardRun to create - the TensorboardTimeSeries in. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` - tensorboard_time_series_id (str): - Optional. The user specified unique ID to use for the - TensorboardTimeSeries, which will become the final component - of the TensorboardTimeSeries's resource name. This value - should match "[a-z0-9][a-z0-9-]{0, 127}". - tensorboard_time_series (google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries): - Required. The TensorboardTimeSeries to - create. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - tensorboard_time_series_id = proto.Field( - proto.STRING, - number=3, - ) - tensorboard_time_series = proto.Field( - proto.MESSAGE, - number=2, - message=gca_tensorboard_time_series.TensorboardTimeSeries, - ) - - -class GetTensorboardTimeSeriesRequest(proto.Message): - r"""Request message for - [TensorboardService.GetTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardTimeSeries]. - - Attributes: - name (str): - Required. The name of the TensorboardTimeSeries resource. - Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListTensorboardTimeSeriesRequest(proto.Message): - r"""Request message for - [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardTimeSeries]. - - Attributes: - parent (str): - Required. The resource name of the - TensorboardRun to list TensorboardTimeSeries. - Format: - 'projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}' - filter (str): - Lists the TensorboardTimeSeries that match - the filter expression. - page_size (int): - The maximum number of TensorboardTimeSeries - to return. The service may return fewer than - this value. If unspecified, at most 50 - TensorboardTimeSeries will be returned. The - maximum value is 1000; values above 1000 will be - coerced to 1000. - page_token (str): - A page token, received from a previous - [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardTimeSeries] - call. Provide this to retrieve the subsequent page. - - When paginating, all other parameters provided to - [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardTimeSeries] - must match the call that provided the page token. - order_by (str): - Field to use to sort the list. - read_mask (google.protobuf.field_mask_pb2.FieldMask): - Mask specifying which fields to read. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - order_by = proto.Field( - proto.STRING, - number=5, - ) - read_mask = proto.Field( - proto.MESSAGE, - number=6, - message=field_mask_pb2.FieldMask, - ) - - -class ListTensorboardTimeSeriesResponse(proto.Message): - r"""Response message for - [TensorboardService.ListTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardTimeSeries]. - - Attributes: - tensorboard_time_series (Sequence[google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries]): - The TensorboardTimeSeries mathching the - request. - next_page_token (str): - A token, which can be sent as - [ListTensorboardTimeSeriesRequest.page_token][google.cloud.aiplatform.v1beta1.ListTensorboardTimeSeriesRequest.page_token] - to retrieve the next page. If this field is omitted, there - are no subsequent pages. - """ - - @property - def raw_page(self): - return self - - tensorboard_time_series = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_tensorboard_time_series.TensorboardTimeSeries, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class UpdateTensorboardTimeSeriesRequest(proto.Message): - r"""Request message for - [TensorboardService.UpdateTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardTimeSeries]. - - Attributes: - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. Field mask is used to specify the fields to be - overwritten in the TensorboardTimeSeries resource by the - update. The fields specified in the update_mask are relative - to the resource, not the full request. A field will be - overwritten if it is in the mask. If the user does not - provide a mask then all fields will be overwritten if new - values are specified. - tensorboard_time_series (google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries): - Required. The TensorboardTimeSeries' ``name`` field is used - to identify the TensorboardTimeSeries to be updated. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` - """ - - update_mask = proto.Field( - proto.MESSAGE, - number=1, - message=field_mask_pb2.FieldMask, - ) - tensorboard_time_series = proto.Field( - proto.MESSAGE, - number=2, - message=gca_tensorboard_time_series.TensorboardTimeSeries, - ) - - -class DeleteTensorboardTimeSeriesRequest(proto.Message): - r"""Request message for - [TensorboardService.DeleteTensorboardTimeSeries][google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardTimeSeries]. - - Attributes: - name (str): - Required. The name of the TensorboardTimeSeries to be - deleted. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class BatchReadTensorboardTimeSeriesDataRequest(proto.Message): - r"""Request message for - [TensorboardService.BatchReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.BatchReadTensorboardTimeSeriesData]. - - Attributes: - tensorboard (str): - Required. The resource name of the Tensorboard containing - TensorboardTimeSeries to read data from. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}``. - The TensorboardTimeSeries referenced by - [time_series][google.cloud.aiplatform.v1beta1.BatchReadTensorboardTimeSeriesDataRequest.time_series] - must be sub resources of this Tensorboard. - time_series (Sequence[str]): - Required. The resource names of the TensorboardTimeSeries to - read data from. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` - """ - - tensorboard = proto.Field( - proto.STRING, - number=1, - ) - time_series = proto.RepeatedField( - proto.STRING, - number=2, - ) - - -class BatchReadTensorboardTimeSeriesDataResponse(proto.Message): - r"""Response message for - [TensorboardService.BatchReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.BatchReadTensorboardTimeSeriesData]. - - Attributes: - time_series_data (Sequence[google.cloud.aiplatform_v1beta1.types.TimeSeriesData]): - The returned time series data. - """ - - time_series_data = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=tensorboard_data.TimeSeriesData, - ) - - -class ReadTensorboardTimeSeriesDataRequest(proto.Message): - r"""Request message for - [TensorboardService.ReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardTimeSeriesData]. - - Attributes: - tensorboard_time_series (str): - Required. The resource name of the TensorboardTimeSeries to - read data from. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` - max_data_points (int): - The maximum number of TensorboardTimeSeries' - data to return. - This value should be a positive integer. - This value can be set to -1 to return all data. - filter (str): - Reads the TensorboardTimeSeries' data that - match the filter expression. - """ - - tensorboard_time_series = proto.Field( - proto.STRING, - number=1, - ) - max_data_points = proto.Field( - proto.INT32, - number=2, - ) - filter = proto.Field( - proto.STRING, - number=3, - ) - - -class ReadTensorboardTimeSeriesDataResponse(proto.Message): - r"""Response message for - [TensorboardService.ReadTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardTimeSeriesData]. - - Attributes: - time_series_data (google.cloud.aiplatform_v1beta1.types.TimeSeriesData): - The returned time series data. - """ - - time_series_data = proto.Field( - proto.MESSAGE, - number=1, - message=tensorboard_data.TimeSeriesData, - ) - - -class WriteTensorboardExperimentDataRequest(proto.Message): - r"""Request message for - [TensorboardService.WriteTensorboardExperimentData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardExperimentData]. - - Attributes: - tensorboard_experiment (str): - Required. The resource name of the TensorboardExperiment to - write data to. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}`` - write_run_data_requests (Sequence[google.cloud.aiplatform_v1beta1.types.WriteTensorboardRunDataRequest]): - Required. Requests containing per-run - TensorboardTimeSeries data to write. - """ - - tensorboard_experiment = proto.Field( - proto.STRING, - number=1, - ) - write_run_data_requests = proto.RepeatedField( - proto.MESSAGE, - number=2, - message='WriteTensorboardRunDataRequest', - ) - - -class WriteTensorboardExperimentDataResponse(proto.Message): - r"""Response message for - [TensorboardService.WriteTensorboardExperimentData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardExperimentData]. - - """ - - -class WriteTensorboardRunDataRequest(proto.Message): - r"""Request message for - [TensorboardService.WriteTensorboardRunData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardRunData]. - - Attributes: - tensorboard_run (str): - Required. The resource name of the TensorboardRun to write - data to. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}`` - time_series_data (Sequence[google.cloud.aiplatform_v1beta1.types.TimeSeriesData]): - Required. The TensorboardTimeSeries data to - write. Values with in a time series are indexed - by their step value. Repeated writes to the same - step will overwrite the existing value for that - step. - The upper limit of data points per write request - is 5000. - """ - - tensorboard_run = proto.Field( - proto.STRING, - number=1, - ) - time_series_data = proto.RepeatedField( - proto.MESSAGE, - number=2, - message=tensorboard_data.TimeSeriesData, - ) - - -class WriteTensorboardRunDataResponse(proto.Message): - r"""Response message for - [TensorboardService.WriteTensorboardRunData][google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardRunData]. - - """ - - -class ExportTensorboardTimeSeriesDataRequest(proto.Message): - r"""Request message for - [TensorboardService.ExportTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ExportTensorboardTimeSeriesData]. - - Attributes: - tensorboard_time_series (str): - Required. The resource name of the TensorboardTimeSeries to - export data from. Format: - ``projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}`` - filter (str): - Exports the TensorboardTimeSeries' data that - match the filter expression. - page_size (int): - The maximum number of data points to return per page. The - default page_size will be 1000. Values must be between 1 and - 10000. Values above 10000 will be coerced to 10000. - page_token (str): - A page token, received from a previous - [TensorboardService.ExportTensorboardTimeSeries][] call. - Provide this to retrieve the subsequent page. - - When paginating, all other parameters provided to - [TensorboardService.ExportTensorboardTimeSeries][] must - match the call that provided the page token. - order_by (str): - Field to use to sort the - TensorboardTimeSeries' data. By default, - TensorboardTimeSeries' data will be returned in - a pseudo random order. - """ - - tensorboard_time_series = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - order_by = proto.Field( - proto.STRING, - number=5, - ) - - -class ExportTensorboardTimeSeriesDataResponse(proto.Message): - r"""Response message for - [TensorboardService.ExportTensorboardTimeSeriesData][google.cloud.aiplatform.v1beta1.TensorboardService.ExportTensorboardTimeSeriesData]. - - Attributes: - time_series_data_points (Sequence[google.cloud.aiplatform_v1beta1.types.TimeSeriesDataPoint]): - The returned time series data points. - next_page_token (str): - A token, which can be sent as - [ExportTensorboardTimeSeriesRequest.page_token][] to - retrieve the next page. If this field is omitted, there are - no subsequent pages. - """ - - @property - def raw_page(self): - return self - - time_series_data_points = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=tensorboard_data.TimeSeriesDataPoint, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class CreateTensorboardOperationMetadata(proto.Message): - r"""Details of operations that perform create Tensorboard. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - Operation metadata for Tensorboard. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -class UpdateTensorboardOperationMetadata(proto.Message): - r"""Details of operations that perform update Tensorboard. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - Operation metadata for Tensorboard. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_time_series.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_time_series.py deleted file mode 100644 index 94c18c08cb..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/tensorboard_time_series.py +++ /dev/null @@ -1,153 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'TensorboardTimeSeries', - }, -) - - -class TensorboardTimeSeries(proto.Message): - r"""TensorboardTimeSeries maps to times series produced in - training runs - - Attributes: - name (str): - Output only. Name of the - TensorboardTimeSeries. - display_name (str): - Required. User provided name of this - TensorboardTimeSeries. This value should be - unique among all TensorboardTimeSeries resources - belonging to the same TensorboardRun resource - (parent resource). - description (str): - Description of this TensorboardTimeSeries. - value_type (google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries.ValueType): - Required. Immutable. Type of - TensorboardTimeSeries value. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - TensorboardTimeSeries was created. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Timestamp when this - TensorboardTimeSeries was last updated. - etag (str): - Used to perform a consistent read-modify- - rite updates. If not set, a blind "overwrite" - update happens. - plugin_name (str): - Immutable. Name of the plugin this time - series pertain to. Such as Scalar, Tensor, Blob - plugin_data (bytes): - Data of the current plugin, with the size - limited to 65KB. - metadata (google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries.Metadata): - Output only. Scalar, Tensor, or Blob metadata - for this TensorboardTimeSeries. - """ - class ValueType(proto.Enum): - r"""An enum representing the value type of a - TensorboardTimeSeries. - """ - VALUE_TYPE_UNSPECIFIED = 0 - SCALAR = 1 - TENSOR = 2 - BLOB_SEQUENCE = 3 - - class Metadata(proto.Message): - r"""Describes metadata for a TensorboardTimeSeries. - - Attributes: - max_step (int): - Output only. Max step index of all data - points within a TensorboardTimeSeries. - max_wall_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Max wall clock timestamp of all - data points within a TensorboardTimeSeries. - max_blob_sequence_length (int): - Output only. The largest blob sequence length (number of - blobs) of all data points in this time series, if its - ValueType is BLOB_SEQUENCE. - """ - - max_step = proto.Field( - proto.INT64, - number=1, - ) - max_wall_time = proto.Field( - proto.MESSAGE, - number=2, - message=timestamp_pb2.Timestamp, - ) - max_blob_sequence_length = proto.Field( - proto.INT64, - number=3, - ) - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - description = proto.Field( - proto.STRING, - number=3, - ) - value_type = proto.Field( - proto.ENUM, - number=4, - enum=ValueType, - ) - create_time = proto.Field( - proto.MESSAGE, - number=5, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=6, - message=timestamp_pb2.Timestamp, - ) - etag = proto.Field( - proto.STRING, - number=7, - ) - plugin_name = proto.Field( - proto.STRING, - number=8, - ) - plugin_data = proto.Field( - proto.BYTES, - number=9, - ) - metadata = proto.Field( - proto.MESSAGE, - number=10, - message=Metadata, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/training_pipeline.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/training_pipeline.py deleted file mode 100644 index 07bc0ed752..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/training_pipeline.py +++ /dev/null @@ -1,633 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec -from google.cloud.aiplatform_v1beta1.types import io -from google.cloud.aiplatform_v1beta1.types import model -from google.cloud.aiplatform_v1beta1.types import pipeline_state -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'TrainingPipeline', - 'InputDataConfig', - 'FractionSplit', - 'FilterSplit', - 'PredefinedSplit', - 'TimestampSplit', - 'StratifiedSplit', - }, -) - - -class TrainingPipeline(proto.Message): - r"""The TrainingPipeline orchestrates tasks associated with training a - Model. It always executes the training task, and optionally may also - export data from Vertex AI's Dataset which becomes the training - input, - [upload][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] - the Model to Vertex AI, and evaluate the Model. - - Attributes: - name (str): - Output only. Resource name of the - TrainingPipeline. - display_name (str): - Required. The user-defined name of this - TrainingPipeline. - input_data_config (google.cloud.aiplatform_v1beta1.types.InputDataConfig): - Specifies Vertex AI owned input data that may be used for - training the Model. The TrainingPipeline's - [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition] - should make clear whether this config is used and if there - are any special requirements on how it should be filled. If - nothing about this config is mentioned in the - [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition], - then it should be assumed that the TrainingPipeline does not - depend on this configuration. - training_task_definition (str): - Required. A Google Cloud Storage path to the - YAML file that defines the training task which - is responsible for producing the model artifact, - and may also include additional auxiliary work. - The definition files that can be used here are - found in gs://google-cloud- - aiplatform/schema/trainingjob/definition/. Note: - The URI given on output will be immutable and - probably different, including the URI scheme, - than the one given on input. The output URI will - point to a location where the user only has a - read access. - training_task_inputs (google.protobuf.struct_pb2.Value): - Required. The training task's parameter(s), as specified in - the - [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition]'s - ``inputs``. - training_task_metadata (google.protobuf.struct_pb2.Value): - Output only. The metadata information as specified in the - [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition]'s - ``metadata``. This metadata is an auxiliary runtime and - final information about the training task. While the - pipeline is running this information is populated only at a - best effort basis. Only present if the pipeline's - [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition] - contains ``metadata`` object. - model_to_upload (google.cloud.aiplatform_v1beta1.types.Model): - Describes the Model that may be uploaded (via - [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel]) - by this TrainingPipeline. The TrainingPipeline's - [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition] - should make clear whether this Model description should be - populated, and if there are any special requirements - regarding how it should be filled. If nothing is mentioned - in the - [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition], - then it should be assumed that this field should not be - filled and the training task either uploads the Model - without a need of this information, or that training task - does not support uploading a Model as part of the pipeline. - When the Pipeline's state becomes - ``PIPELINE_STATE_SUCCEEDED`` and the trained Model had been - uploaded into Vertex AI, then the model_to_upload's resource - [name][google.cloud.aiplatform.v1beta1.Model.name] is - populated. The Model is always uploaded into the Project and - Location in which this pipeline is. - state (google.cloud.aiplatform_v1beta1.types.PipelineState): - Output only. The detailed state of the - pipeline. - error (google.rpc.status_pb2.Status): - Output only. Only populated when the pipeline's state is - ``PIPELINE_STATE_FAILED`` or ``PIPELINE_STATE_CANCELLED``. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the TrainingPipeline - was created. - start_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the TrainingPipeline for the first - time entered the ``PIPELINE_STATE_RUNNING`` state. - end_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the TrainingPipeline entered any of - the following states: ``PIPELINE_STATE_SUCCEEDED``, - ``PIPELINE_STATE_FAILED``, ``PIPELINE_STATE_CANCELLED``. - update_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. Time when the TrainingPipeline - was most recently updated. - labels (Sequence[google.cloud.aiplatform_v1beta1.types.TrainingPipeline.LabelsEntry]): - The labels with user-defined metadata to - organize TrainingPipelines. - Label keys and values can be no longer than 64 - characters (Unicode codepoints), can only - contain lowercase letters, numeric characters, - underscores and dashes. International characters - are allowed. - See https://goo.gl/xmQnxf for more information - and examples of labels. - encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec): - Customer-managed encryption key spec for a TrainingPipeline. - If set, this TrainingPipeline will be secured by this key. - - Note: Model trained by this TrainingPipeline is also secured - by this key if - [model_to_upload][google.cloud.aiplatform.v1beta1.TrainingPipeline.encryption_spec] - is not set separately. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - input_data_config = proto.Field( - proto.MESSAGE, - number=3, - message='InputDataConfig', - ) - training_task_definition = proto.Field( - proto.STRING, - number=4, - ) - training_task_inputs = proto.Field( - proto.MESSAGE, - number=5, - message=struct_pb2.Value, - ) - training_task_metadata = proto.Field( - proto.MESSAGE, - number=6, - message=struct_pb2.Value, - ) - model_to_upload = proto.Field( - proto.MESSAGE, - number=7, - message=model.Model, - ) - state = proto.Field( - proto.ENUM, - number=9, - enum=pipeline_state.PipelineState, - ) - error = proto.Field( - proto.MESSAGE, - number=10, - message=status_pb2.Status, - ) - create_time = proto.Field( - proto.MESSAGE, - number=11, - message=timestamp_pb2.Timestamp, - ) - start_time = proto.Field( - proto.MESSAGE, - number=12, - message=timestamp_pb2.Timestamp, - ) - end_time = proto.Field( - proto.MESSAGE, - number=13, - message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, - number=14, - message=timestamp_pb2.Timestamp, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=15, - ) - encryption_spec = proto.Field( - proto.MESSAGE, - number=18, - message=gca_encryption_spec.EncryptionSpec, - ) - - -class InputDataConfig(proto.Message): - r"""Specifies Vertex AI owned input data to be used for training, - and possibly evaluating, the Model. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - fraction_split (google.cloud.aiplatform_v1beta1.types.FractionSplit): - Split based on fractions defining the size of - each set. - - This field is a member of `oneof`_ ``split``. - filter_split (google.cloud.aiplatform_v1beta1.types.FilterSplit): - Split based on the provided filters for each - set. - - This field is a member of `oneof`_ ``split``. - predefined_split (google.cloud.aiplatform_v1beta1.types.PredefinedSplit): - Supported only for tabular Datasets. - Split based on a predefined key. - - This field is a member of `oneof`_ ``split``. - timestamp_split (google.cloud.aiplatform_v1beta1.types.TimestampSplit): - Supported only for tabular Datasets. - Split based on the timestamp of the input data - pieces. - - This field is a member of `oneof`_ ``split``. - stratified_split (google.cloud.aiplatform_v1beta1.types.StratifiedSplit): - Supported only for tabular Datasets. - Split based on the distribution of the specified - column. - - This field is a member of `oneof`_ ``split``. - gcs_destination (google.cloud.aiplatform_v1beta1.types.GcsDestination): - The Cloud Storage location where the training data is to be - written to. In the given directory a new directory is - created with name: - ``dataset---`` - where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 - format. All training input data is written into that - directory. - - The Vertex AI environment variables representing Cloud - Storage data URIs are represented in the Cloud Storage - wildcard format to support sharded data. e.g.: - "gs://.../training-*.jsonl" - - - AIP_DATA_FORMAT = "jsonl" for non-tabular data, "csv" for - tabular data - - - AIP_TRAINING_DATA_URI = - "gcs_destination/dataset---/training-*.${AIP_DATA_FORMAT}" - - - AIP_VALIDATION_DATA_URI = - "gcs_destination/dataset---/validation-*.${AIP_DATA_FORMAT}" - - - AIP_TEST_DATA_URI = - "gcs_destination/dataset---/test-*.${AIP_DATA_FORMAT}". - - This field is a member of `oneof`_ ``destination``. - bigquery_destination (google.cloud.aiplatform_v1beta1.types.BigQueryDestination): - Only applicable to custom training with tabular Dataset with - BigQuery source. - - The BigQuery project location where the training data is to - be written to. In the given project a new dataset is created - with name - ``dataset___`` - where timestamp is in YYYY_MM_DDThh_mm_ss_sssZ format. All - training input data is written into that dataset. In the - dataset three tables are created, ``training``, - ``validation`` and ``test``. - - - AIP_DATA_FORMAT = "bigquery". - - - AIP_TRAINING_DATA_URI = - "bigquery_destination.dataset\_\ **\ .training" - - - AIP_VALIDATION_DATA_URI = - "bigquery_destination.dataset\_\ **\ .validation" - - - AIP_TEST_DATA_URI = - "bigquery_destination.dataset\_\ **\ .test". - - This field is a member of `oneof`_ ``destination``. - dataset_id (str): - Required. The ID of the Dataset in the same Project and - Location which data will be used to train the Model. The - Dataset must use schema compatible with Model being trained, - and what is compatible should be described in the used - TrainingPipeline's [training_task_definition] - [google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition]. - For tabular Datasets, all their data is exported to - training, to pick and choose from. - annotations_filter (str): - Applicable only to Datasets that have DataItems and - Annotations. - - A filter on Annotations of the Dataset. Only Annotations - that both match this filter and belong to DataItems not - ignored by the split method are used in respectively - training, validation or test role, depending on the role of - the DataItem they are on (for the auto-assigned that role is - decided by Vertex AI). A filter with same syntax as the one - used in - [ListAnnotations][google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations] - may be used, but note here it filters across all Annotations - of the Dataset, and not just within a single DataItem. - annotation_schema_uri (str): - Applicable only to custom training with Datasets that have - DataItems and Annotations. - - Cloud Storage URI that points to a YAML file describing the - annotation schema. The schema is defined as an OpenAPI 3.0.2 - `Schema - Object `__. - The schema files that can be used here are found in - gs://google-cloud-aiplatform/schema/dataset/annotation/ , - note that the chosen schema must be consistent with - [metadata][google.cloud.aiplatform.v1beta1.Dataset.metadata_schema_uri] - of the Dataset specified by - [dataset_id][google.cloud.aiplatform.v1beta1.InputDataConfig.dataset_id]. - - Only Annotations that both match this schema and belong to - DataItems not ignored by the split method are used in - respectively training, validation or test role, depending on - the role of the DataItem they are on. - - When used in conjunction with - [annotations_filter][google.cloud.aiplatform.v1beta1.InputDataConfig.annotations_filter], - the Annotations used for training are filtered by both - [annotations_filter][google.cloud.aiplatform.v1beta1.InputDataConfig.annotations_filter] - and - [annotation_schema_uri][google.cloud.aiplatform.v1beta1.InputDataConfig.annotation_schema_uri]. - """ - - fraction_split = proto.Field( - proto.MESSAGE, - number=2, - oneof='split', - message='FractionSplit', - ) - filter_split = proto.Field( - proto.MESSAGE, - number=3, - oneof='split', - message='FilterSplit', - ) - predefined_split = proto.Field( - proto.MESSAGE, - number=4, - oneof='split', - message='PredefinedSplit', - ) - timestamp_split = proto.Field( - proto.MESSAGE, - number=5, - oneof='split', - message='TimestampSplit', - ) - stratified_split = proto.Field( - proto.MESSAGE, - number=12, - oneof='split', - message='StratifiedSplit', - ) - gcs_destination = proto.Field( - proto.MESSAGE, - number=8, - oneof='destination', - message=io.GcsDestination, - ) - bigquery_destination = proto.Field( - proto.MESSAGE, - number=10, - oneof='destination', - message=io.BigQueryDestination, - ) - dataset_id = proto.Field( - proto.STRING, - number=1, - ) - annotations_filter = proto.Field( - proto.STRING, - number=6, - ) - annotation_schema_uri = proto.Field( - proto.STRING, - number=9, - ) - - -class FractionSplit(proto.Message): - r"""Assigns the input data to training, validation, and test sets as per - the given fractions. Any of ``training_fraction``, - ``validation_fraction`` and ``test_fraction`` may optionally be - provided, they must sum to up to 1. If the provided ones sum to less - than 1, the remainder is assigned to sets as decided by Vertex AI. - If none of the fractions are set, by default roughly 80% of data is - used for training, 10% for validation, and 10% for test. - - Attributes: - training_fraction (float): - The fraction of the input data that is to be - used to train the Model. - validation_fraction (float): - The fraction of the input data that is to be - used to validate the Model. - test_fraction (float): - The fraction of the input data that is to be - used to evaluate the Model. - """ - - training_fraction = proto.Field( - proto.DOUBLE, - number=1, - ) - validation_fraction = proto.Field( - proto.DOUBLE, - number=2, - ) - test_fraction = proto.Field( - proto.DOUBLE, - number=3, - ) - - -class FilterSplit(proto.Message): - r"""Assigns input data to training, validation, and test sets - based on the given filters, data pieces not matched by any - filter are ignored. Currently only supported for Datasets - containing DataItems. - If any of the filters in this message are to match nothing, then - they can be set as '-' (the minus sign). - - Supported only for unstructured Datasets. - - Attributes: - training_filter (str): - Required. A filter on DataItems of the Dataset. DataItems - that match this filter are used to train the Model. A filter - with same syntax as the one used in - [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems] - may be used. If a single DataItem is matched by more than - one of the FilterSplit filters, then it is assigned to the - first set that applies to it in the training, validation, - test order. - validation_filter (str): - Required. A filter on DataItems of the Dataset. DataItems - that match this filter are used to validate the Model. A - filter with same syntax as the one used in - [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems] - may be used. If a single DataItem is matched by more than - one of the FilterSplit filters, then it is assigned to the - first set that applies to it in the training, validation, - test order. - test_filter (str): - Required. A filter on DataItems of the Dataset. DataItems - that match this filter are used to test the Model. A filter - with same syntax as the one used in - [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems] - may be used. If a single DataItem is matched by more than - one of the FilterSplit filters, then it is assigned to the - first set that applies to it in the training, validation, - test order. - """ - - training_filter = proto.Field( - proto.STRING, - number=1, - ) - validation_filter = proto.Field( - proto.STRING, - number=2, - ) - test_filter = proto.Field( - proto.STRING, - number=3, - ) - - -class PredefinedSplit(proto.Message): - r"""Assigns input data to training, validation, and test sets - based on the value of a provided key. - - Supported only for tabular Datasets. - - Attributes: - key (str): - Required. The key is a name of one of the Dataset's data - columns. The value of the key (either the label's value or - value in the column) must be one of {``training``, - ``validation``, ``test``}, and it defines to which set the - given piece of data is assigned. If for a piece of data the - key is not present or has an invalid value, that piece is - ignored by the pipeline. - """ - - key = proto.Field( - proto.STRING, - number=1, - ) - - -class TimestampSplit(proto.Message): - r"""Assigns input data to training, validation, and test sets - based on a provided timestamps. The youngest data pieces are - assigned to training set, next to validation set, and the oldest - to the test set. - Supported only for tabular Datasets. - - Attributes: - training_fraction (float): - The fraction of the input data that is to be - used to train the Model. - validation_fraction (float): - The fraction of the input data that is to be - used to validate the Model. - test_fraction (float): - The fraction of the input data that is to be - used to evaluate the Model. - key (str): - Required. The key is a name of one of the Dataset's data - columns. The values of the key (the values in the column) - must be in RFC 3339 ``date-time`` format, where - ``time-offset`` = ``"Z"`` (e.g. 1985-04-12T23:20:50.52Z). If - for a piece of data the key is not present or has an invalid - value, that piece is ignored by the pipeline. - """ - - training_fraction = proto.Field( - proto.DOUBLE, - number=1, - ) - validation_fraction = proto.Field( - proto.DOUBLE, - number=2, - ) - test_fraction = proto.Field( - proto.DOUBLE, - number=3, - ) - key = proto.Field( - proto.STRING, - number=4, - ) - - -class StratifiedSplit(proto.Message): - r"""Assigns input data to the training, validation, and test sets so - that the distribution of values found in the categorical column (as - specified by the ``key`` field) is mirrored within each split. The - fraction values determine the relative sizes of the splits. - - For example, if the specified column has three values, with 50% of - the rows having value "A", 25% value "B", and 25% value "C", and the - split fractions are specified as 80/10/10, then the training set - will constitute 80% of the training data, with about 50% of the - training set rows having the value "A" for the specified column, - about 25% having the value "B", and about 25% having the value "C". - - Only the top 500 occurring values are used; any values not in the - top 500 values are randomly assigned to a split. If less than three - rows contain a specific value, those rows are randomly assigned. - - Supported only for tabular Datasets. - - Attributes: - training_fraction (float): - The fraction of the input data that is to be - used to train the Model. - validation_fraction (float): - The fraction of the input data that is to be - used to validate the Model. - test_fraction (float): - The fraction of the input data that is to be - used to evaluate the Model. - key (str): - Required. The key is a name of one of the - Dataset's data columns. The key provided must be - for a categorical column. - """ - - training_fraction = proto.Field( - proto.DOUBLE, - number=1, - ) - validation_fraction = proto.Field( - proto.DOUBLE, - number=2, - ) - test_fraction = proto.Field( - proto.DOUBLE, - number=3, - ) - key = proto.Field( - proto.STRING, - number=4, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/types.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/types.py deleted file mode 100644 index 0c475a3bdb..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/types.py +++ /dev/null @@ -1,86 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'BoolArray', - 'DoubleArray', - 'Int64Array', - 'StringArray', - }, -) - - -class BoolArray(proto.Message): - r"""A list of boolean values. - - Attributes: - values (Sequence[bool]): - A list of bool values. - """ - - values = proto.RepeatedField( - proto.BOOL, - number=1, - ) - - -class DoubleArray(proto.Message): - r"""A list of double values. - - Attributes: - values (Sequence[float]): - A list of bool values. - """ - - values = proto.RepeatedField( - proto.DOUBLE, - number=1, - ) - - -class Int64Array(proto.Message): - r"""A list of int64 values. - - Attributes: - values (Sequence[int]): - A list of int64 values. - """ - - values = proto.RepeatedField( - proto.INT64, - number=1, - ) - - -class StringArray(proto.Message): - r"""A list of string values. - - Attributes: - values (Sequence[str]): - A list of string values. - """ - - values = proto.RepeatedField( - proto.STRING, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/user_action_reference.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/user_action_reference.py deleted file mode 100644 index 8b7382fdcc..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/user_action_reference.py +++ /dev/null @@ -1,75 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'UserActionReference', - }, -) - - -class UserActionReference(proto.Message): - r"""References an API call. It contains more information about - long running operation and Jobs that are triggered by the API - call. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - operation (str): - For API calls that return a long running - operation. Resource name of the long running - operation. Format: - 'projects/{project}/locations/{location}/operations/{operation}' - - This field is a member of `oneof`_ ``reference``. - data_labeling_job (str): - For API calls that start a LabelingJob. Resource name of the - LabelingJob. Format: - 'projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}' - - This field is a member of `oneof`_ ``reference``. - method (str): - The method name of the API RPC call. For - example, - "/google.cloud.aiplatform.{apiVersion}.DatasetService.CreateDataset". - """ - - operation = proto.Field( - proto.STRING, - number=1, - oneof='reference', - ) - data_labeling_job = proto.Field( - proto.STRING, - number=2, - oneof='reference', - ) - method = proto.Field( - proto.STRING, - number=3, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/value.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/value.py deleted file mode 100644 index c4c0aec465..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/value.py +++ /dev/null @@ -1,69 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'Value', - }, -) - - -class Value(proto.Message): - r"""Value is the value of the field. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - int_value (int): - An integer value. - - This field is a member of `oneof`_ ``value``. - double_value (float): - A double value. - - This field is a member of `oneof`_ ``value``. - string_value (str): - A string value. - - This field is a member of `oneof`_ ``value``. - """ - - int_value = proto.Field( - proto.INT64, - number=1, - oneof='value', - ) - double_value = proto.Field( - proto.DOUBLE, - number=2, - oneof='value', - ) - string_value = proto.Field( - proto.STRING, - number=3, - oneof='value', - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/vizier_service.py b/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/vizier_service.py deleted file mode 100644 index ec447f58e1..0000000000 --- a/owl-bot-staging/v1beta1/google/cloud/aiplatform_v1beta1/types/vizier_service.py +++ /dev/null @@ -1,589 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.cloud.aiplatform_v1beta1.types import operation -from google.cloud.aiplatform_v1beta1.types import study as gca_study -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.cloud.aiplatform.v1beta1', - manifest={ - 'GetStudyRequest', - 'CreateStudyRequest', - 'ListStudiesRequest', - 'ListStudiesResponse', - 'DeleteStudyRequest', - 'LookupStudyRequest', - 'SuggestTrialsRequest', - 'SuggestTrialsResponse', - 'SuggestTrialsMetadata', - 'CreateTrialRequest', - 'GetTrialRequest', - 'ListTrialsRequest', - 'ListTrialsResponse', - 'AddTrialMeasurementRequest', - 'CompleteTrialRequest', - 'DeleteTrialRequest', - 'CheckTrialEarlyStoppingStateRequest', - 'CheckTrialEarlyStoppingStateResponse', - 'CheckTrialEarlyStoppingStateMetatdata', - 'StopTrialRequest', - 'ListOptimalTrialsRequest', - 'ListOptimalTrialsResponse', - }, -) - - -class GetStudyRequest(proto.Message): - r"""Request message for - [VizierService.GetStudy][google.cloud.aiplatform.v1beta1.VizierService.GetStudy]. - - Attributes: - name (str): - Required. The name of the Study resource. Format: - ``projects/{project}/locations/{location}/studies/{study}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class CreateStudyRequest(proto.Message): - r"""Request message for - [VizierService.CreateStudy][google.cloud.aiplatform.v1beta1.VizierService.CreateStudy]. - - Attributes: - parent (str): - Required. The resource name of the Location to create the - CustomJob in. Format: - ``projects/{project}/locations/{location}`` - study (google.cloud.aiplatform_v1beta1.types.Study): - Required. The Study configuration used to - create the Study. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - study = proto.Field( - proto.MESSAGE, - number=2, - message=gca_study.Study, - ) - - -class ListStudiesRequest(proto.Message): - r"""Request message for - [VizierService.ListStudies][google.cloud.aiplatform.v1beta1.VizierService.ListStudies]. - - Attributes: - parent (str): - Required. The resource name of the Location to list the - Study from. Format: - ``projects/{project}/locations/{location}`` - page_token (str): - Optional. A page token to request the next - page of results. If unspecified, there are no - subsequent pages. - page_size (int): - Optional. The maximum number of studies to - return per "page" of results. If unspecified, - service will pick an appropriate default. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - page_token = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - - -class ListStudiesResponse(proto.Message): - r"""Response message for - [VizierService.ListStudies][google.cloud.aiplatform.v1beta1.VizierService.ListStudies]. - - Attributes: - studies (Sequence[google.cloud.aiplatform_v1beta1.types.Study]): - The studies associated with the project. - next_page_token (str): - Passes this token as the ``page_token`` field of the request - for a subsequent call. If this field is omitted, there are - no subsequent pages. - """ - - @property - def raw_page(self): - return self - - studies = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_study.Study, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class DeleteStudyRequest(proto.Message): - r"""Request message for - [VizierService.DeleteStudy][google.cloud.aiplatform.v1beta1.VizierService.DeleteStudy]. - - Attributes: - name (str): - Required. The name of the Study resource to be deleted. - Format: - ``projects/{project}/locations/{location}/studies/{study}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class LookupStudyRequest(proto.Message): - r"""Request message for - [VizierService.LookupStudy][google.cloud.aiplatform.v1beta1.VizierService.LookupStudy]. - - Attributes: - parent (str): - Required. The resource name of the Location to get the Study - from. Format: ``projects/{project}/locations/{location}`` - display_name (str): - Required. The user-defined display name of - the Study - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - display_name = proto.Field( - proto.STRING, - number=2, - ) - - -class SuggestTrialsRequest(proto.Message): - r"""Request message for - [VizierService.SuggestTrials][google.cloud.aiplatform.v1beta1.VizierService.SuggestTrials]. - - Attributes: - parent (str): - Required. The project and location that the Study belongs - to. Format: - ``projects/{project}/locations/{location}/studies/{study}`` - suggestion_count (int): - Required. The number of suggestions - requested. - client_id (str): - Required. The identifier of the client that is requesting - the suggestion. - - If multiple SuggestTrialsRequests have the same - ``client_id``, the service will return the identical - suggested Trial if the Trial is pending, and provide a new - Trial if the last suggested Trial was completed. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - suggestion_count = proto.Field( - proto.INT32, - number=2, - ) - client_id = proto.Field( - proto.STRING, - number=3, - ) - - -class SuggestTrialsResponse(proto.Message): - r"""Response message for - [VizierService.SuggestTrials][google.cloud.aiplatform.v1beta1.VizierService.SuggestTrials]. - - Attributes: - trials (Sequence[google.cloud.aiplatform_v1beta1.types.Trial]): - A list of Trials. - study_state (google.cloud.aiplatform_v1beta1.types.Study.State): - The state of the Study. - start_time (google.protobuf.timestamp_pb2.Timestamp): - The time at which the operation was started. - end_time (google.protobuf.timestamp_pb2.Timestamp): - The time at which operation processing - completed. - """ - - trials = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_study.Trial, - ) - study_state = proto.Field( - proto.ENUM, - number=2, - enum=gca_study.Study.State, - ) - start_time = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - end_time = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - - -class SuggestTrialsMetadata(proto.Message): - r"""Details of operations that perform Trials suggestion. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - Operation metadata for suggesting Trials. - client_id (str): - The identifier of the client that is requesting the - suggestion. - - If multiple SuggestTrialsRequests have the same - ``client_id``, the service will return the identical - suggested Trial if the Trial is pending, and provide a new - Trial if the last suggested Trial was completed. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - client_id = proto.Field( - proto.STRING, - number=2, - ) - - -class CreateTrialRequest(proto.Message): - r"""Request message for - [VizierService.CreateTrial][google.cloud.aiplatform.v1beta1.VizierService.CreateTrial]. - - Attributes: - parent (str): - Required. The resource name of the Study to create the Trial - in. Format: - ``projects/{project}/locations/{location}/studies/{study}`` - trial (google.cloud.aiplatform_v1beta1.types.Trial): - Required. The Trial to create. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - trial = proto.Field( - proto.MESSAGE, - number=2, - message=gca_study.Trial, - ) - - -class GetTrialRequest(proto.Message): - r"""Request message for - [VizierService.GetTrial][google.cloud.aiplatform.v1beta1.VizierService.GetTrial]. - - Attributes: - name (str): - Required. The name of the Trial resource. Format: - ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListTrialsRequest(proto.Message): - r"""Request message for - [VizierService.ListTrials][google.cloud.aiplatform.v1beta1.VizierService.ListTrials]. - - Attributes: - parent (str): - Required. The resource name of the Study to list the Trial - from. Format: - ``projects/{project}/locations/{location}/studies/{study}`` - page_token (str): - Optional. A page token to request the next - page of results. If unspecified, there are no - subsequent pages. - page_size (int): - Optional. The number of Trials to retrieve - per "page" of results. If unspecified, the - service will pick an appropriate default. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - page_token = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - - -class ListTrialsResponse(proto.Message): - r"""Response message for - [VizierService.ListTrials][google.cloud.aiplatform.v1beta1.VizierService.ListTrials]. - - Attributes: - trials (Sequence[google.cloud.aiplatform_v1beta1.types.Trial]): - The Trials associated with the Study. - next_page_token (str): - Pass this token as the ``page_token`` field of the request - for a subsequent call. If this field is omitted, there are - no subsequent pages. - """ - - @property - def raw_page(self): - return self - - trials = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_study.Trial, - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class AddTrialMeasurementRequest(proto.Message): - r"""Request message for - [VizierService.AddTrialMeasurement][google.cloud.aiplatform.v1beta1.VizierService.AddTrialMeasurement]. - - Attributes: - trial_name (str): - Required. The name of the trial to add measurement. Format: - ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` - measurement (google.cloud.aiplatform_v1beta1.types.Measurement): - Required. The measurement to be added to a - Trial. - """ - - trial_name = proto.Field( - proto.STRING, - number=1, - ) - measurement = proto.Field( - proto.MESSAGE, - number=3, - message=gca_study.Measurement, - ) - - -class CompleteTrialRequest(proto.Message): - r"""Request message for - [VizierService.CompleteTrial][google.cloud.aiplatform.v1beta1.VizierService.CompleteTrial]. - - Attributes: - name (str): - Required. The Trial's name. Format: - ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` - final_measurement (google.cloud.aiplatform_v1beta1.types.Measurement): - Optional. If provided, it will be used as the completed - Trial's final_measurement; Otherwise, the service will - auto-select a previously reported measurement as the - final-measurement - trial_infeasible (bool): - Optional. True if the Trial cannot be run with the given - Parameter, and final_measurement will be ignored. - infeasible_reason (str): - Optional. A human readable reason why the trial was - infeasible. This should only be provided if - ``trial_infeasible`` is true. - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - final_measurement = proto.Field( - proto.MESSAGE, - number=2, - message=gca_study.Measurement, - ) - trial_infeasible = proto.Field( - proto.BOOL, - number=3, - ) - infeasible_reason = proto.Field( - proto.STRING, - number=4, - ) - - -class DeleteTrialRequest(proto.Message): - r"""Request message for - [VizierService.DeleteTrial][google.cloud.aiplatform.v1beta1.VizierService.DeleteTrial]. - - Attributes: - name (str): - Required. The Trial's name. Format: - ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class CheckTrialEarlyStoppingStateRequest(proto.Message): - r"""Request message for - [VizierService.CheckTrialEarlyStoppingState][google.cloud.aiplatform.v1beta1.VizierService.CheckTrialEarlyStoppingState]. - - Attributes: - trial_name (str): - Required. The Trial's name. Format: - ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` - """ - - trial_name = proto.Field( - proto.STRING, - number=1, - ) - - -class CheckTrialEarlyStoppingStateResponse(proto.Message): - r"""Response message for - [VizierService.CheckTrialEarlyStoppingState][google.cloud.aiplatform.v1beta1.VizierService.CheckTrialEarlyStoppingState]. - - Attributes: - should_stop (bool): - True if the Trial should stop. - """ - - should_stop = proto.Field( - proto.BOOL, - number=1, - ) - - -class CheckTrialEarlyStoppingStateMetatdata(proto.Message): - r"""This message will be placed in the metadata field of a - google.longrunning.Operation associated with a - CheckTrialEarlyStoppingState request. - - Attributes: - generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): - Operation metadata for suggesting Trials. - study (str): - The name of the Study that the Trial belongs - to. - trial (str): - The Trial name. - """ - - generic_metadata = proto.Field( - proto.MESSAGE, - number=1, - message=operation.GenericOperationMetadata, - ) - study = proto.Field( - proto.STRING, - number=2, - ) - trial = proto.Field( - proto.STRING, - number=3, - ) - - -class StopTrialRequest(proto.Message): - r"""Request message for - [VizierService.StopTrial][google.cloud.aiplatform.v1beta1.VizierService.StopTrial]. - - Attributes: - name (str): - Required. The Trial's name. Format: - ``projects/{project}/locations/{location}/studies/{study}/trials/{trial}`` - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListOptimalTrialsRequest(proto.Message): - r"""Request message for - [VizierService.ListOptimalTrials][google.cloud.aiplatform.v1beta1.VizierService.ListOptimalTrials]. - - Attributes: - parent (str): - Required. The name of the Study that the - optimal Trial belongs to. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - - -class ListOptimalTrialsResponse(proto.Message): - r"""Response message for - [VizierService.ListOptimalTrials][google.cloud.aiplatform.v1beta1.VizierService.ListOptimalTrials]. - - Attributes: - optimal_trials (Sequence[google.cloud.aiplatform_v1beta1.types.Trial]): - The pareto-optimal Trials for multiple objective Study or - the optimal trial for single objective Study. The definition - of pareto-optimal can be checked in wiki page. - https://en.wikipedia.org/wiki/Pareto_efficiency - """ - - optimal_trials = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gca_study.Trial, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/mypy.ini b/owl-bot-staging/v1beta1/mypy.ini deleted file mode 100644 index 4505b48543..0000000000 --- a/owl-bot-staging/v1beta1/mypy.ini +++ /dev/null @@ -1,3 +0,0 @@ -[mypy] -python_version = 3.6 -namespace_packages = True diff --git a/owl-bot-staging/v1beta1/noxfile.py b/owl-bot-staging/v1beta1/noxfile.py deleted file mode 100644 index 32d26332af..0000000000 --- a/owl-bot-staging/v1beta1/noxfile.py +++ /dev/null @@ -1,132 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import pathlib -import shutil -import subprocess -import sys - - -import nox # type: ignore - -CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() - -LOWER_BOUND_CONSTRAINTS_FILE = CURRENT_DIRECTORY / "constraints.txt" -PACKAGE_NAME = subprocess.check_output([sys.executable, "setup.py", "--name"], encoding="utf-8") - - -nox.sessions = [ - "unit", - "cover", - "mypy", - "check_lower_bounds" - # exclude update_lower_bounds from default - "docs", -] - -@nox.session(python=['3.6', '3.7', '3.8', '3.9', '3.10']) -def unit(session): - """Run the unit test suite.""" - - session.install('coverage', 'pytest', 'pytest-cov', 'asyncmock', 'pytest-asyncio') - session.install('-e', '.') - - session.run( - 'py.test', - '--quiet', - '--cov=google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/', - '--cov-config=.coveragerc', - '--cov-report=term', - '--cov-report=html', - os.path.join('tests', 'unit', ''.join(session.posargs)) - ) - - -@nox.session(python='3.9') -def cover(session): - """Run the final coverage report. - This outputs the coverage report aggregating coverage from the unit - test runs (not system test runs), and then erases coverage data. - """ - session.install("coverage", "pytest-cov") - session.run("coverage", "report", "--show-missing", "--fail-under=100") - - session.run("coverage", "erase") - - -@nox.session(python=['3.6', '3.7', '3.8', '3.9']) -def mypy(session): - """Run the type checker.""" - session.install('mypy', 'types-pkg_resources') - session.install('.') - session.run( - 'mypy', - '--explicit-package-bases', - 'google', - ) - - -@nox.session -def update_lower_bounds(session): - """Update lower bounds in constraints.txt to match setup.py""" - session.install('google-cloud-testutils') - session.install('.') - - session.run( - 'lower-bound-checker', - 'update', - '--package-name', - PACKAGE_NAME, - '--constraints-file', - str(LOWER_BOUND_CONSTRAINTS_FILE), - ) - - -@nox.session -def check_lower_bounds(session): - """Check lower bounds in setup.py are reflected in constraints file""" - session.install('google-cloud-testutils') - session.install('.') - - session.run( - 'lower-bound-checker', - 'check', - '--package-name', - PACKAGE_NAME, - '--constraints-file', - str(LOWER_BOUND_CONSTRAINTS_FILE), - ) - -@nox.session(python='3.9') -def docs(session): - """Build the docs for this library.""" - - session.install("-e", ".") - session.install("sphinx<3.0.0", "alabaster", "recommonmark") - - shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) - session.run( - "sphinx-build", - "-W", # warnings as errors - "-T", # show full traceback on exception - "-N", # no colors - "-b", - "html", - "-d", - os.path.join("docs", "_build", "doctrees", ""), - os.path.join("docs", ""), - os.path.join("docs", "_build", "html", ""), - ) diff --git a/owl-bot-staging/v1beta1/scripts/fixup_aiplatform_v1beta1_keywords.py b/owl-bot-staging/v1beta1/scripts/fixup_aiplatform_v1beta1_keywords.py deleted file mode 100644 index 9946d392bb..0000000000 --- a/owl-bot-staging/v1beta1/scripts/fixup_aiplatform_v1beta1_keywords.py +++ /dev/null @@ -1,359 +0,0 @@ -#! /usr/bin/env python3 -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import argparse -import os -import libcst as cst -import pathlib -import sys -from typing import (Any, Callable, Dict, List, Sequence, Tuple) - - -def partition( - predicate: Callable[[Any], bool], - iterator: Sequence[Any] -) -> Tuple[List[Any], List[Any]]: - """A stable, out-of-place partition.""" - results = ([], []) - - for i in iterator: - results[int(predicate(i))].append(i) - - # Returns trueList, falseList - return results[1], results[0] - - -class aiplatformCallTransformer(cst.CSTTransformer): - CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') - METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { - 'add_context_artifacts_and_executions': ('context', 'artifacts', 'executions', ), - 'add_context_children': ('context', 'child_contexts', ), - 'add_execution_events': ('execution', 'events', ), - 'add_trial_measurement': ('trial_name', 'measurement', ), - 'batch_create_features': ('parent', 'requests', ), - 'batch_create_tensorboard_runs': ('parent', 'requests', ), - 'batch_create_tensorboard_time_series': ('parent', 'requests', ), - 'batch_migrate_resources': ('parent', 'migrate_resource_requests', ), - 'batch_read_feature_values': ('featurestore', 'destination', 'entity_type_specs', 'csv_read_instances', 'bigquery_read_instances', 'pass_through_fields', ), - 'batch_read_tensorboard_time_series_data': ('tensorboard', 'time_series', ), - 'cancel_batch_prediction_job': ('name', ), - 'cancel_custom_job': ('name', ), - 'cancel_data_labeling_job': ('name', ), - 'cancel_hyperparameter_tuning_job': ('name', ), - 'cancel_pipeline_job': ('name', ), - 'cancel_training_pipeline': ('name', ), - 'check_trial_early_stopping_state': ('trial_name', ), - 'complete_trial': ('name', 'final_measurement', 'trial_infeasible', 'infeasible_reason', ), - 'create_artifact': ('parent', 'artifact', 'artifact_id', ), - 'create_batch_prediction_job': ('parent', 'batch_prediction_job', ), - 'create_context': ('parent', 'context', 'context_id', ), - 'create_custom_job': ('parent', 'custom_job', ), - 'create_data_labeling_job': ('parent', 'data_labeling_job', ), - 'create_dataset': ('parent', 'dataset', ), - 'create_endpoint': ('parent', 'endpoint', 'endpoint_id', ), - 'create_entity_type': ('parent', 'entity_type_id', 'entity_type', ), - 'create_execution': ('parent', 'execution', 'execution_id', ), - 'create_feature': ('parent', 'feature', 'feature_id', ), - 'create_featurestore': ('parent', 'featurestore', 'featurestore_id', ), - 'create_hyperparameter_tuning_job': ('parent', 'hyperparameter_tuning_job', ), - 'create_index': ('parent', 'index', ), - 'create_index_endpoint': ('parent', 'index_endpoint', ), - 'create_metadata_schema': ('parent', 'metadata_schema', 'metadata_schema_id', ), - 'create_metadata_store': ('parent', 'metadata_store', 'metadata_store_id', ), - 'create_model_deployment_monitoring_job': ('parent', 'model_deployment_monitoring_job', ), - 'create_pipeline_job': ('parent', 'pipeline_job', 'pipeline_job_id', ), - 'create_specialist_pool': ('parent', 'specialist_pool', ), - 'create_study': ('parent', 'study', ), - 'create_tensorboard': ('parent', 'tensorboard', ), - 'create_tensorboard_experiment': ('parent', 'tensorboard_experiment_id', 'tensorboard_experiment', ), - 'create_tensorboard_run': ('parent', 'tensorboard_run', 'tensorboard_run_id', ), - 'create_tensorboard_time_series': ('parent', 'tensorboard_time_series', 'tensorboard_time_series_id', ), - 'create_training_pipeline': ('parent', 'training_pipeline', ), - 'create_trial': ('parent', 'trial', ), - 'delete_artifact': ('name', 'etag', ), - 'delete_batch_prediction_job': ('name', ), - 'delete_context': ('name', 'force', 'etag', ), - 'delete_custom_job': ('name', ), - 'delete_data_labeling_job': ('name', ), - 'delete_dataset': ('name', ), - 'delete_endpoint': ('name', ), - 'delete_entity_type': ('name', 'force', ), - 'delete_execution': ('name', 'etag', ), - 'delete_feature': ('name', ), - 'delete_featurestore': ('name', 'force', ), - 'delete_hyperparameter_tuning_job': ('name', ), - 'delete_index': ('name', ), - 'delete_index_endpoint': ('name', ), - 'delete_metadata_store': ('name', 'force', ), - 'delete_model': ('name', ), - 'delete_model_deployment_monitoring_job': ('name', ), - 'delete_pipeline_job': ('name', ), - 'delete_specialist_pool': ('name', 'force', ), - 'delete_study': ('name', ), - 'delete_tensorboard': ('name', ), - 'delete_tensorboard_experiment': ('name', ), - 'delete_tensorboard_run': ('name', ), - 'delete_tensorboard_time_series': ('name', ), - 'delete_training_pipeline': ('name', ), - 'delete_trial': ('name', ), - 'deploy_index': ('index_endpoint', 'deployed_index', ), - 'deploy_model': ('endpoint', 'deployed_model', 'traffic_split', ), - 'explain': ('endpoint', 'instances', 'parameters', 'explanation_spec_override', 'deployed_model_id', ), - 'export_data': ('name', 'export_config', ), - 'export_feature_values': ('entity_type', 'destination', 'feature_selector', 'snapshot_export', 'full_export', 'settings', ), - 'export_model': ('name', 'output_config', ), - 'export_tensorboard_time_series_data': ('tensorboard_time_series', 'filter', 'page_size', 'page_token', 'order_by', ), - 'get_annotation_spec': ('name', 'read_mask', ), - 'get_artifact': ('name', ), - 'get_batch_prediction_job': ('name', ), - 'get_context': ('name', ), - 'get_custom_job': ('name', ), - 'get_data_labeling_job': ('name', ), - 'get_dataset': ('name', 'read_mask', ), - 'get_endpoint': ('name', ), - 'get_entity_type': ('name', ), - 'get_execution': ('name', ), - 'get_feature': ('name', ), - 'get_featurestore': ('name', ), - 'get_hyperparameter_tuning_job': ('name', ), - 'get_index': ('name', ), - 'get_index_endpoint': ('name', ), - 'get_metadata_schema': ('name', ), - 'get_metadata_store': ('name', ), - 'get_model': ('name', ), - 'get_model_deployment_monitoring_job': ('name', ), - 'get_model_evaluation': ('name', ), - 'get_model_evaluation_slice': ('name', ), - 'get_pipeline_job': ('name', ), - 'get_specialist_pool': ('name', ), - 'get_study': ('name', ), - 'get_tensorboard': ('name', ), - 'get_tensorboard_experiment': ('name', ), - 'get_tensorboard_run': ('name', ), - 'get_tensorboard_time_series': ('name', ), - 'get_training_pipeline': ('name', ), - 'get_trial': ('name', ), - 'import_data': ('name', 'import_configs', ), - 'import_feature_values': ('entity_type', 'feature_specs', 'avro_source', 'bigquery_source', 'csv_source', 'feature_time_field', 'feature_time', 'entity_id_field', 'disable_online_serving', 'worker_count', ), - 'list_annotations': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', 'order_by', ), - 'list_artifacts': ('parent', 'page_size', 'page_token', 'filter', ), - 'list_batch_prediction_jobs': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), - 'list_contexts': ('parent', 'page_size', 'page_token', 'filter', ), - 'list_custom_jobs': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), - 'list_data_items': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', 'order_by', ), - 'list_data_labeling_jobs': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', 'order_by', ), - 'list_datasets': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', 'order_by', ), - 'list_endpoints': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), - 'list_entity_types': ('parent', 'filter', 'page_size', 'page_token', 'order_by', 'read_mask', ), - 'list_executions': ('parent', 'page_size', 'page_token', 'filter', ), - 'list_features': ('parent', 'filter', 'page_size', 'page_token', 'order_by', 'read_mask', 'latest_stats_count', ), - 'list_featurestores': ('parent', 'filter', 'page_size', 'page_token', 'order_by', 'read_mask', ), - 'list_hyperparameter_tuning_jobs': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), - 'list_index_endpoints': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), - 'list_indexes': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), - 'list_metadata_schemas': ('parent', 'page_size', 'page_token', 'filter', ), - 'list_metadata_stores': ('parent', 'page_size', 'page_token', ), - 'list_model_deployment_monitoring_jobs': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), - 'list_model_evaluations': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), - 'list_model_evaluation_slices': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), - 'list_models': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), - 'list_optimal_trials': ('parent', ), - 'list_pipeline_jobs': ('parent', 'filter', 'page_size', 'page_token', 'order_by', ), - 'list_specialist_pools': ('parent', 'page_size', 'page_token', 'read_mask', ), - 'list_studies': ('parent', 'page_token', 'page_size', ), - 'list_tensorboard_experiments': ('parent', 'filter', 'page_size', 'page_token', 'order_by', 'read_mask', ), - 'list_tensorboard_runs': ('parent', 'filter', 'page_size', 'page_token', 'order_by', 'read_mask', ), - 'list_tensorboards': ('parent', 'filter', 'page_size', 'page_token', 'order_by', 'read_mask', ), - 'list_tensorboard_time_series': ('parent', 'filter', 'page_size', 'page_token', 'order_by', 'read_mask', ), - 'list_training_pipelines': ('parent', 'filter', 'page_size', 'page_token', 'read_mask', ), - 'list_trials': ('parent', 'page_token', 'page_size', ), - 'lookup_study': ('parent', 'display_name', ), - 'mutate_deployed_index': ('index_endpoint', 'deployed_index', ), - 'pause_model_deployment_monitoring_job': ('name', ), - 'predict': ('endpoint', 'instances', 'parameters', ), - 'purge_artifacts': ('parent', 'filter', 'force', ), - 'purge_contexts': ('parent', 'filter', 'force', ), - 'purge_executions': ('parent', 'filter', 'force', ), - 'query_artifact_lineage_subgraph': ('artifact', 'max_hops', 'filter', ), - 'query_context_lineage_subgraph': ('context', ), - 'query_execution_inputs_and_outputs': ('execution', ), - 'raw_predict': ('endpoint', 'http_body', ), - 'read_feature_values': ('entity_type', 'entity_id', 'feature_selector', ), - 'read_tensorboard_blob_data': ('time_series', 'blob_ids', ), - 'read_tensorboard_time_series_data': ('tensorboard_time_series', 'max_data_points', 'filter', ), - 'resume_model_deployment_monitoring_job': ('name', ), - 'search_features': ('location', 'query', 'page_size', 'page_token', ), - 'search_migratable_resources': ('parent', 'page_size', 'page_token', 'filter', ), - 'search_model_deployment_monitoring_stats_anomalies': ('model_deployment_monitoring_job', 'deployed_model_id', 'objectives', 'feature_display_name', 'page_size', 'page_token', 'start_time', 'end_time', ), - 'stop_trial': ('name', ), - 'streaming_read_feature_values': ('entity_type', 'entity_ids', 'feature_selector', ), - 'suggest_trials': ('parent', 'suggestion_count', 'client_id', ), - 'undeploy_index': ('index_endpoint', 'deployed_index_id', ), - 'undeploy_model': ('endpoint', 'deployed_model_id', 'traffic_split', ), - 'update_artifact': ('artifact', 'update_mask', 'allow_missing', ), - 'update_context': ('context', 'update_mask', 'allow_missing', ), - 'update_dataset': ('dataset', 'update_mask', ), - 'update_endpoint': ('endpoint', 'update_mask', ), - 'update_entity_type': ('entity_type', 'update_mask', ), - 'update_execution': ('execution', 'update_mask', 'allow_missing', ), - 'update_feature': ('feature', 'update_mask', ), - 'update_featurestore': ('featurestore', 'update_mask', ), - 'update_index': ('index', 'update_mask', ), - 'update_index_endpoint': ('index_endpoint', 'update_mask', ), - 'update_model': ('model', 'update_mask', ), - 'update_model_deployment_monitoring_job': ('model_deployment_monitoring_job', 'update_mask', ), - 'update_specialist_pool': ('specialist_pool', 'update_mask', ), - 'update_tensorboard': ('update_mask', 'tensorboard', ), - 'update_tensorboard_experiment': ('update_mask', 'tensorboard_experiment', ), - 'update_tensorboard_run': ('update_mask', 'tensorboard_run', ), - 'update_tensorboard_time_series': ('update_mask', 'tensorboard_time_series', ), - 'upload_model': ('parent', 'model', ), - 'write_tensorboard_experiment_data': ('tensorboard_experiment', 'write_run_data_requests', ), - 'write_tensorboard_run_data': ('tensorboard_run', 'time_series_data', ), - } - - def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: - try: - key = original.func.attr.value - kword_params = self.METHOD_TO_PARAMS[key] - except (AttributeError, KeyError): - # Either not a method from the API or too convoluted to be sure. - return updated - - # If the existing code is valid, keyword args come after positional args. - # Therefore, all positional args must map to the first parameters. - args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) - if any(k.keyword.value == "request" for k in kwargs): - # We've already fixed this file, don't fix it again. - return updated - - kwargs, ctrl_kwargs = partition( - lambda a: a.keyword.value not in self.CTRL_PARAMS, - kwargs - ) - - args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] - ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) - for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) - - request_arg = cst.Arg( - value=cst.Dict([ - cst.DictElement( - cst.SimpleString("'{}'".format(name)), -cst.Element(value=arg.value) - ) - # Note: the args + kwargs looks silly, but keep in mind that - # the control parameters had to be stripped out, and that - # those could have been passed positionally or by keyword. - for name, arg in zip(kword_params, args + kwargs)]), - keyword=cst.Name("request") - ) - - return updated.with_changes( - args=[request_arg] + ctrl_kwargs - ) - - -def fix_files( - in_dir: pathlib.Path, - out_dir: pathlib.Path, - *, - transformer=aiplatformCallTransformer(), -): - """Duplicate the input dir to the output dir, fixing file method calls. - - Preconditions: - * in_dir is a real directory - * out_dir is a real, empty directory - """ - pyfile_gen = ( - pathlib.Path(os.path.join(root, f)) - for root, _, files in os.walk(in_dir) - for f in files if os.path.splitext(f)[1] == ".py" - ) - - for fpath in pyfile_gen: - with open(fpath, 'r') as f: - src = f.read() - - # Parse the code and insert method call fixes. - tree = cst.parse_module(src) - updated = tree.visit(transformer) - - # Create the path and directory structure for the new file. - updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) - updated_path.parent.mkdir(parents=True, exist_ok=True) - - # Generate the updated source file at the corresponding path. - with open(updated_path, 'w') as f: - f.write(updated.code) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser( - description="""Fix up source that uses the aiplatform client library. - -The existing sources are NOT overwritten but are copied to output_dir with changes made. - -Note: This tool operates at a best-effort level at converting positional - parameters in client method calls to keyword based parameters. - Cases where it WILL FAIL include - A) * or ** expansion in a method call. - B) Calls via function or method alias (includes free function calls) - C) Indirect or dispatched calls (e.g. the method is looked up dynamically) - - These all constitute false negatives. The tool will also detect false - positives when an API method shares a name with another method. -""") - parser.add_argument( - '-d', - '--input-directory', - required=True, - dest='input_dir', - help='the input directory to walk for python files to fix up', - ) - parser.add_argument( - '-o', - '--output-directory', - required=True, - dest='output_dir', - help='the directory to output files fixed via un-flattening', - ) - args = parser.parse_args() - input_dir = pathlib.Path(args.input_dir) - output_dir = pathlib.Path(args.output_dir) - if not input_dir.is_dir(): - print( - f"input directory '{input_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if not output_dir.is_dir(): - print( - f"output directory '{output_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if os.listdir(output_dir): - print( - f"output directory '{output_dir}' is not empty", - file=sys.stderr, - ) - sys.exit(-1) - - fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1beta1/scripts/fixup_definition_v1beta1_keywords.py b/owl-bot-staging/v1beta1/scripts/fixup_definition_v1beta1_keywords.py deleted file mode 100644 index 7fb60affb5..0000000000 --- a/owl-bot-staging/v1beta1/scripts/fixup_definition_v1beta1_keywords.py +++ /dev/null @@ -1,175 +0,0 @@ -#! /usr/bin/env python3 -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import argparse -import os -import libcst as cst -import pathlib -import sys -from typing import (Any, Callable, Dict, List, Sequence, Tuple) - - -def partition( - predicate: Callable[[Any], bool], - iterator: Sequence[Any] -) -> Tuple[List[Any], List[Any]]: - """A stable, out-of-place partition.""" - results = ([], []) - - for i in iterator: - results[int(predicate(i))].append(i) - - # Returns trueList, falseList - return results[1], results[0] - - -class definitionCallTransformer(cst.CSTTransformer): - CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') - METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { - } - - def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: - try: - key = original.func.attr.value - kword_params = self.METHOD_TO_PARAMS[key] - except (AttributeError, KeyError): - # Either not a method from the API or too convoluted to be sure. - return updated - - # If the existing code is valid, keyword args come after positional args. - # Therefore, all positional args must map to the first parameters. - args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) - if any(k.keyword.value == "request" for k in kwargs): - # We've already fixed this file, don't fix it again. - return updated - - kwargs, ctrl_kwargs = partition( - lambda a: a.keyword.value not in self.CTRL_PARAMS, - kwargs - ) - - args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] - ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) - for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) - - request_arg = cst.Arg( - value=cst.Dict([ - cst.DictElement( - cst.SimpleString("'{}'".format(name)), -cst.Element(value=arg.value) - ) - # Note: the args + kwargs looks silly, but keep in mind that - # the control parameters had to be stripped out, and that - # those could have been passed positionally or by keyword. - for name, arg in zip(kword_params, args + kwargs)]), - keyword=cst.Name("request") - ) - - return updated.with_changes( - args=[request_arg] + ctrl_kwargs - ) - - -def fix_files( - in_dir: pathlib.Path, - out_dir: pathlib.Path, - *, - transformer=definitionCallTransformer(), -): - """Duplicate the input dir to the output dir, fixing file method calls. - - Preconditions: - * in_dir is a real directory - * out_dir is a real, empty directory - """ - pyfile_gen = ( - pathlib.Path(os.path.join(root, f)) - for root, _, files in os.walk(in_dir) - for f in files if os.path.splitext(f)[1] == ".py" - ) - - for fpath in pyfile_gen: - with open(fpath, 'r') as f: - src = f.read() - - # Parse the code and insert method call fixes. - tree = cst.parse_module(src) - updated = tree.visit(transformer) - - # Create the path and directory structure for the new file. - updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) - updated_path.parent.mkdir(parents=True, exist_ok=True) - - # Generate the updated source file at the corresponding path. - with open(updated_path, 'w') as f: - f.write(updated.code) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser( - description="""Fix up source that uses the definition client library. - -The existing sources are NOT overwritten but are copied to output_dir with changes made. - -Note: This tool operates at a best-effort level at converting positional - parameters in client method calls to keyword based parameters. - Cases where it WILL FAIL include - A) * or ** expansion in a method call. - B) Calls via function or method alias (includes free function calls) - C) Indirect or dispatched calls (e.g. the method is looked up dynamically) - - These all constitute false negatives. The tool will also detect false - positives when an API method shares a name with another method. -""") - parser.add_argument( - '-d', - '--input-directory', - required=True, - dest='input_dir', - help='the input directory to walk for python files to fix up', - ) - parser.add_argument( - '-o', - '--output-directory', - required=True, - dest='output_dir', - help='the directory to output files fixed via un-flattening', - ) - args = parser.parse_args() - input_dir = pathlib.Path(args.input_dir) - output_dir = pathlib.Path(args.output_dir) - if not input_dir.is_dir(): - print( - f"input directory '{input_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if not output_dir.is_dir(): - print( - f"output directory '{output_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if os.listdir(output_dir): - print( - f"output directory '{output_dir}' is not empty", - file=sys.stderr, - ) - sys.exit(-1) - - fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1beta1/scripts/fixup_instance_v1beta1_keywords.py b/owl-bot-staging/v1beta1/scripts/fixup_instance_v1beta1_keywords.py deleted file mode 100644 index 52fe576948..0000000000 --- a/owl-bot-staging/v1beta1/scripts/fixup_instance_v1beta1_keywords.py +++ /dev/null @@ -1,175 +0,0 @@ -#! /usr/bin/env python3 -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import argparse -import os -import libcst as cst -import pathlib -import sys -from typing import (Any, Callable, Dict, List, Sequence, Tuple) - - -def partition( - predicate: Callable[[Any], bool], - iterator: Sequence[Any] -) -> Tuple[List[Any], List[Any]]: - """A stable, out-of-place partition.""" - results = ([], []) - - for i in iterator: - results[int(predicate(i))].append(i) - - # Returns trueList, falseList - return results[1], results[0] - - -class instanceCallTransformer(cst.CSTTransformer): - CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') - METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { - } - - def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: - try: - key = original.func.attr.value - kword_params = self.METHOD_TO_PARAMS[key] - except (AttributeError, KeyError): - # Either not a method from the API or too convoluted to be sure. - return updated - - # If the existing code is valid, keyword args come after positional args. - # Therefore, all positional args must map to the first parameters. - args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) - if any(k.keyword.value == "request" for k in kwargs): - # We've already fixed this file, don't fix it again. - return updated - - kwargs, ctrl_kwargs = partition( - lambda a: a.keyword.value not in self.CTRL_PARAMS, - kwargs - ) - - args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] - ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) - for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) - - request_arg = cst.Arg( - value=cst.Dict([ - cst.DictElement( - cst.SimpleString("'{}'".format(name)), -cst.Element(value=arg.value) - ) - # Note: the args + kwargs looks silly, but keep in mind that - # the control parameters had to be stripped out, and that - # those could have been passed positionally or by keyword. - for name, arg in zip(kword_params, args + kwargs)]), - keyword=cst.Name("request") - ) - - return updated.with_changes( - args=[request_arg] + ctrl_kwargs - ) - - -def fix_files( - in_dir: pathlib.Path, - out_dir: pathlib.Path, - *, - transformer=instanceCallTransformer(), -): - """Duplicate the input dir to the output dir, fixing file method calls. - - Preconditions: - * in_dir is a real directory - * out_dir is a real, empty directory - """ - pyfile_gen = ( - pathlib.Path(os.path.join(root, f)) - for root, _, files in os.walk(in_dir) - for f in files if os.path.splitext(f)[1] == ".py" - ) - - for fpath in pyfile_gen: - with open(fpath, 'r') as f: - src = f.read() - - # Parse the code and insert method call fixes. - tree = cst.parse_module(src) - updated = tree.visit(transformer) - - # Create the path and directory structure for the new file. - updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) - updated_path.parent.mkdir(parents=True, exist_ok=True) - - # Generate the updated source file at the corresponding path. - with open(updated_path, 'w') as f: - f.write(updated.code) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser( - description="""Fix up source that uses the instance client library. - -The existing sources are NOT overwritten but are copied to output_dir with changes made. - -Note: This tool operates at a best-effort level at converting positional - parameters in client method calls to keyword based parameters. - Cases where it WILL FAIL include - A) * or ** expansion in a method call. - B) Calls via function or method alias (includes free function calls) - C) Indirect or dispatched calls (e.g. the method is looked up dynamically) - - These all constitute false negatives. The tool will also detect false - positives when an API method shares a name with another method. -""") - parser.add_argument( - '-d', - '--input-directory', - required=True, - dest='input_dir', - help='the input directory to walk for python files to fix up', - ) - parser.add_argument( - '-o', - '--output-directory', - required=True, - dest='output_dir', - help='the directory to output files fixed via un-flattening', - ) - args = parser.parse_args() - input_dir = pathlib.Path(args.input_dir) - output_dir = pathlib.Path(args.output_dir) - if not input_dir.is_dir(): - print( - f"input directory '{input_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if not output_dir.is_dir(): - print( - f"output directory '{output_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if os.listdir(output_dir): - print( - f"output directory '{output_dir}' is not empty", - file=sys.stderr, - ) - sys.exit(-1) - - fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1beta1/scripts/fixup_params_v1beta1_keywords.py b/owl-bot-staging/v1beta1/scripts/fixup_params_v1beta1_keywords.py deleted file mode 100644 index 6331faf507..0000000000 --- a/owl-bot-staging/v1beta1/scripts/fixup_params_v1beta1_keywords.py +++ /dev/null @@ -1,175 +0,0 @@ -#! /usr/bin/env python3 -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import argparse -import os -import libcst as cst -import pathlib -import sys -from typing import (Any, Callable, Dict, List, Sequence, Tuple) - - -def partition( - predicate: Callable[[Any], bool], - iterator: Sequence[Any] -) -> Tuple[List[Any], List[Any]]: - """A stable, out-of-place partition.""" - results = ([], []) - - for i in iterator: - results[int(predicate(i))].append(i) - - # Returns trueList, falseList - return results[1], results[0] - - -class paramsCallTransformer(cst.CSTTransformer): - CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') - METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { - } - - def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: - try: - key = original.func.attr.value - kword_params = self.METHOD_TO_PARAMS[key] - except (AttributeError, KeyError): - # Either not a method from the API or too convoluted to be sure. - return updated - - # If the existing code is valid, keyword args come after positional args. - # Therefore, all positional args must map to the first parameters. - args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) - if any(k.keyword.value == "request" for k in kwargs): - # We've already fixed this file, don't fix it again. - return updated - - kwargs, ctrl_kwargs = partition( - lambda a: a.keyword.value not in self.CTRL_PARAMS, - kwargs - ) - - args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] - ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) - for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) - - request_arg = cst.Arg( - value=cst.Dict([ - cst.DictElement( - cst.SimpleString("'{}'".format(name)), -cst.Element(value=arg.value) - ) - # Note: the args + kwargs looks silly, but keep in mind that - # the control parameters had to be stripped out, and that - # those could have been passed positionally or by keyword. - for name, arg in zip(kword_params, args + kwargs)]), - keyword=cst.Name("request") - ) - - return updated.with_changes( - args=[request_arg] + ctrl_kwargs - ) - - -def fix_files( - in_dir: pathlib.Path, - out_dir: pathlib.Path, - *, - transformer=paramsCallTransformer(), -): - """Duplicate the input dir to the output dir, fixing file method calls. - - Preconditions: - * in_dir is a real directory - * out_dir is a real, empty directory - """ - pyfile_gen = ( - pathlib.Path(os.path.join(root, f)) - for root, _, files in os.walk(in_dir) - for f in files if os.path.splitext(f)[1] == ".py" - ) - - for fpath in pyfile_gen: - with open(fpath, 'r') as f: - src = f.read() - - # Parse the code and insert method call fixes. - tree = cst.parse_module(src) - updated = tree.visit(transformer) - - # Create the path and directory structure for the new file. - updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) - updated_path.parent.mkdir(parents=True, exist_ok=True) - - # Generate the updated source file at the corresponding path. - with open(updated_path, 'w') as f: - f.write(updated.code) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser( - description="""Fix up source that uses the params client library. - -The existing sources are NOT overwritten but are copied to output_dir with changes made. - -Note: This tool operates at a best-effort level at converting positional - parameters in client method calls to keyword based parameters. - Cases where it WILL FAIL include - A) * or ** expansion in a method call. - B) Calls via function or method alias (includes free function calls) - C) Indirect or dispatched calls (e.g. the method is looked up dynamically) - - These all constitute false negatives. The tool will also detect false - positives when an API method shares a name with another method. -""") - parser.add_argument( - '-d', - '--input-directory', - required=True, - dest='input_dir', - help='the input directory to walk for python files to fix up', - ) - parser.add_argument( - '-o', - '--output-directory', - required=True, - dest='output_dir', - help='the directory to output files fixed via un-flattening', - ) - args = parser.parse_args() - input_dir = pathlib.Path(args.input_dir) - output_dir = pathlib.Path(args.output_dir) - if not input_dir.is_dir(): - print( - f"input directory '{input_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if not output_dir.is_dir(): - print( - f"output directory '{output_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if os.listdir(output_dir): - print( - f"output directory '{output_dir}' is not empty", - file=sys.stderr, - ) - sys.exit(-1) - - fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1beta1/scripts/fixup_prediction_v1beta1_keywords.py b/owl-bot-staging/v1beta1/scripts/fixup_prediction_v1beta1_keywords.py deleted file mode 100644 index df2a884371..0000000000 --- a/owl-bot-staging/v1beta1/scripts/fixup_prediction_v1beta1_keywords.py +++ /dev/null @@ -1,175 +0,0 @@ -#! /usr/bin/env python3 -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import argparse -import os -import libcst as cst -import pathlib -import sys -from typing import (Any, Callable, Dict, List, Sequence, Tuple) - - -def partition( - predicate: Callable[[Any], bool], - iterator: Sequence[Any] -) -> Tuple[List[Any], List[Any]]: - """A stable, out-of-place partition.""" - results = ([], []) - - for i in iterator: - results[int(predicate(i))].append(i) - - # Returns trueList, falseList - return results[1], results[0] - - -class predictionCallTransformer(cst.CSTTransformer): - CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') - METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { - } - - def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: - try: - key = original.func.attr.value - kword_params = self.METHOD_TO_PARAMS[key] - except (AttributeError, KeyError): - # Either not a method from the API or too convoluted to be sure. - return updated - - # If the existing code is valid, keyword args come after positional args. - # Therefore, all positional args must map to the first parameters. - args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) - if any(k.keyword.value == "request" for k in kwargs): - # We've already fixed this file, don't fix it again. - return updated - - kwargs, ctrl_kwargs = partition( - lambda a: a.keyword.value not in self.CTRL_PARAMS, - kwargs - ) - - args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] - ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) - for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) - - request_arg = cst.Arg( - value=cst.Dict([ - cst.DictElement( - cst.SimpleString("'{}'".format(name)), -cst.Element(value=arg.value) - ) - # Note: the args + kwargs looks silly, but keep in mind that - # the control parameters had to be stripped out, and that - # those could have been passed positionally or by keyword. - for name, arg in zip(kword_params, args + kwargs)]), - keyword=cst.Name("request") - ) - - return updated.with_changes( - args=[request_arg] + ctrl_kwargs - ) - - -def fix_files( - in_dir: pathlib.Path, - out_dir: pathlib.Path, - *, - transformer=predictionCallTransformer(), -): - """Duplicate the input dir to the output dir, fixing file method calls. - - Preconditions: - * in_dir is a real directory - * out_dir is a real, empty directory - """ - pyfile_gen = ( - pathlib.Path(os.path.join(root, f)) - for root, _, files in os.walk(in_dir) - for f in files if os.path.splitext(f)[1] == ".py" - ) - - for fpath in pyfile_gen: - with open(fpath, 'r') as f: - src = f.read() - - # Parse the code and insert method call fixes. - tree = cst.parse_module(src) - updated = tree.visit(transformer) - - # Create the path and directory structure for the new file. - updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) - updated_path.parent.mkdir(parents=True, exist_ok=True) - - # Generate the updated source file at the corresponding path. - with open(updated_path, 'w') as f: - f.write(updated.code) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser( - description="""Fix up source that uses the prediction client library. - -The existing sources are NOT overwritten but are copied to output_dir with changes made. - -Note: This tool operates at a best-effort level at converting positional - parameters in client method calls to keyword based parameters. - Cases where it WILL FAIL include - A) * or ** expansion in a method call. - B) Calls via function or method alias (includes free function calls) - C) Indirect or dispatched calls (e.g. the method is looked up dynamically) - - These all constitute false negatives. The tool will also detect false - positives when an API method shares a name with another method. -""") - parser.add_argument( - '-d', - '--input-directory', - required=True, - dest='input_dir', - help='the input directory to walk for python files to fix up', - ) - parser.add_argument( - '-o', - '--output-directory', - required=True, - dest='output_dir', - help='the directory to output files fixed via un-flattening', - ) - args = parser.parse_args() - input_dir = pathlib.Path(args.input_dir) - output_dir = pathlib.Path(args.output_dir) - if not input_dir.is_dir(): - print( - f"input directory '{input_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if not output_dir.is_dir(): - print( - f"output directory '{output_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if os.listdir(output_dir): - print( - f"output directory '{output_dir}' is not empty", - file=sys.stderr, - ) - sys.exit(-1) - - fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1beta1/setup.py b/owl-bot-staging/v1beta1/setup.py deleted file mode 100644 index a5f61b66fb..0000000000 --- a/owl-bot-staging/v1beta1/setup.py +++ /dev/null @@ -1,54 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import io -import os -import setuptools # type: ignore - -version = '0.1.0' - -package_root = os.path.abspath(os.path.dirname(__file__)) - -readme_filename = os.path.join(package_root, 'README.rst') -with io.open(readme_filename, encoding='utf-8') as readme_file: - readme = readme_file.read() - -setuptools.setup( - name='google-cloud-aiplatform-v1beta1-schema-trainingjob-definition', - version=version, - long_description=readme, - packages=setuptools.PEP420PackageFinder.find(), - namespace_packages=('google', 'google.cloud', 'google.cloud.aiplatform', 'google.cloud.aiplatform.v1beta1', 'google.cloud.aiplatform.v1beta1.schema', 'google.cloud.aiplatform.v1beta1.schema.trainingjob'), - platforms='Posix; MacOS X; Windows', - include_package_data=True, - install_requires=( - 'google-api-core[grpc] >= 1.28.0, < 3.0.0dev', - 'libcst >= 0.2.5', - 'proto-plus >= 1.19.7', - ), - python_requires='>=3.6', - classifiers=[ - 'Development Status :: 3 - Alpha', - 'Intended Audience :: Developers', - 'Operating System :: OS Independent', - 'Programming Language :: Python :: 3.6', - 'Programming Language :: Python :: 3.7', - 'Programming Language :: Python :: 3.8', - 'Programming Language :: Python :: 3.9', - 'Topic :: Internet', - 'Topic :: Software Development :: Libraries :: Python Modules', - ], - zip_safe=False, -) diff --git a/owl-bot-staging/v1beta1/tests/__init__.py b/owl-bot-staging/v1beta1/tests/__init__.py deleted file mode 100644 index b54a5fcc42..0000000000 --- a/owl-bot-staging/v1beta1/tests/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1beta1/tests/unit/__init__.py b/owl-bot-staging/v1beta1/tests/unit/__init__.py deleted file mode 100644 index b54a5fcc42..0000000000 --- a/owl-bot-staging/v1beta1/tests/unit/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/__init__.py b/owl-bot-staging/v1beta1/tests/unit/gapic/__init__.py deleted file mode 100644 index b54a5fcc42..0000000000 --- a/owl-bot-staging/v1beta1/tests/unit/gapic/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/__init__.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/__init__.py deleted file mode 100644 index b54a5fcc42..0000000000 --- a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_dataset_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_dataset_service.py deleted file mode 100644 index 37f68a2b5a..0000000000 --- a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_dataset_service.py +++ /dev/null @@ -1,4030 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import future -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import operation_async # type: ignore -from google.api_core import operations_v1 -from google.api_core import path_template -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.aiplatform_v1beta1.services.dataset_service import DatasetServiceAsyncClient -from google.cloud.aiplatform_v1beta1.services.dataset_service import DatasetServiceClient -from google.cloud.aiplatform_v1beta1.services.dataset_service import pagers -from google.cloud.aiplatform_v1beta1.services.dataset_service import transports -from google.cloud.aiplatform_v1beta1.types import annotation -from google.cloud.aiplatform_v1beta1.types import annotation_spec -from google.cloud.aiplatform_v1beta1.types import data_item -from google.cloud.aiplatform_v1beta1.types import dataset -from google.cloud.aiplatform_v1beta1.types import dataset as gca_dataset -from google.cloud.aiplatform_v1beta1.types import dataset_service -from google.cloud.aiplatform_v1beta1.types import encryption_spec -from google.cloud.aiplatform_v1beta1.types import io -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.longrunning import operations_pb2 -from google.oauth2 import service_account -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -import google.auth - - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert DatasetServiceClient._get_default_mtls_endpoint(None) is None - assert DatasetServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert DatasetServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert DatasetServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert DatasetServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert DatasetServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - DatasetServiceClient, - DatasetServiceAsyncClient, -]) -def test_dataset_service_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.DatasetServiceGrpcTransport, "grpc"), - (transports.DatasetServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_dataset_service_client_service_account_always_use_jwt(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=False) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("client_class", [ - DatasetServiceClient, - DatasetServiceAsyncClient, -]) -def test_dataset_service_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_dataset_service_client_get_transport_class(): - transport = DatasetServiceClient.get_transport_class() - available_transports = [ - transports.DatasetServiceGrpcTransport, - ] - assert transport in available_transports - - transport = DatasetServiceClient.get_transport_class("grpc") - assert transport == transports.DatasetServiceGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (DatasetServiceClient, transports.DatasetServiceGrpcTransport, "grpc"), - (DatasetServiceAsyncClient, transports.DatasetServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(DatasetServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DatasetServiceClient)) -@mock.patch.object(DatasetServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DatasetServiceAsyncClient)) -def test_dataset_service_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(DatasetServiceClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(DatasetServiceClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (DatasetServiceClient, transports.DatasetServiceGrpcTransport, "grpc", "true"), - (DatasetServiceAsyncClient, transports.DatasetServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (DatasetServiceClient, transports.DatasetServiceGrpcTransport, "grpc", "false"), - (DatasetServiceAsyncClient, transports.DatasetServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(DatasetServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DatasetServiceClient)) -@mock.patch.object(DatasetServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DatasetServiceAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_dataset_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (DatasetServiceClient, transports.DatasetServiceGrpcTransport, "grpc"), - (DatasetServiceAsyncClient, transports.DatasetServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_dataset_service_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (DatasetServiceClient, transports.DatasetServiceGrpcTransport, "grpc"), - (DatasetServiceAsyncClient, transports.DatasetServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_dataset_service_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_dataset_service_client_client_options_from_dict(): - with mock.patch('google.cloud.aiplatform_v1beta1.services.dataset_service.transports.DatasetServiceGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = DatasetServiceClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_create_dataset(transport: str = 'grpc', request_type=dataset_service.CreateDatasetRequest): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.create_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.CreateDatasetRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_create_dataset_from_dict(): - test_create_dataset(request_type=dict) - - -def test_create_dataset_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_dataset), - '__call__') as call: - client.create_dataset() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.CreateDatasetRequest() - - -@pytest.mark.asyncio -async def test_create_dataset_async(transport: str = 'grpc_asyncio', request_type=dataset_service.CreateDatasetRequest): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.create_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.CreateDatasetRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_create_dataset_async_from_dict(): - await test_create_dataset_async(request_type=dict) - - -def test_create_dataset_field_headers(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.CreateDatasetRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_dataset), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.create_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_dataset_field_headers_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.CreateDatasetRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_dataset), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.create_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_dataset_flattened(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_dataset( - parent='parent_value', - dataset=gca_dataset.Dataset(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].dataset - mock_val = gca_dataset.Dataset(name='name_value') - assert arg == mock_val - - -def test_create_dataset_flattened_error(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_dataset( - dataset_service.CreateDatasetRequest(), - parent='parent_value', - dataset=gca_dataset.Dataset(name='name_value'), - ) - - -@pytest.mark.asyncio -async def test_create_dataset_flattened_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_dataset( - parent='parent_value', - dataset=gca_dataset.Dataset(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].dataset - mock_val = gca_dataset.Dataset(name='name_value') - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_dataset_flattened_error_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_dataset( - dataset_service.CreateDatasetRequest(), - parent='parent_value', - dataset=gca_dataset.Dataset(name='name_value'), - ) - - -def test_get_dataset(transport: str = 'grpc', request_type=dataset_service.GetDatasetRequest): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = dataset.Dataset( - name='name_value', - display_name='display_name_value', - description='description_value', - metadata_schema_uri='metadata_schema_uri_value', - etag='etag_value', - ) - response = client.get_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.GetDatasetRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, dataset.Dataset) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.metadata_schema_uri == 'metadata_schema_uri_value' - assert response.etag == 'etag_value' - - -def test_get_dataset_from_dict(): - test_get_dataset(request_type=dict) - - -def test_get_dataset_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_dataset), - '__call__') as call: - client.get_dataset() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.GetDatasetRequest() - - -@pytest.mark.asyncio -async def test_get_dataset_async(transport: str = 'grpc_asyncio', request_type=dataset_service.GetDatasetRequest): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(dataset.Dataset( - name='name_value', - display_name='display_name_value', - description='description_value', - metadata_schema_uri='metadata_schema_uri_value', - etag='etag_value', - )) - response = await client.get_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.GetDatasetRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, dataset.Dataset) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.metadata_schema_uri == 'metadata_schema_uri_value' - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_get_dataset_async_from_dict(): - await test_get_dataset_async(request_type=dict) - - -def test_get_dataset_field_headers(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.GetDatasetRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_dataset), - '__call__') as call: - call.return_value = dataset.Dataset() - client.get_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_dataset_field_headers_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.GetDatasetRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_dataset), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset.Dataset()) - await client.get_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_dataset_flattened(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = dataset.Dataset() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_dataset( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_dataset_flattened_error(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_dataset( - dataset_service.GetDatasetRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_dataset_flattened_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = dataset.Dataset() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset.Dataset()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_dataset( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_dataset_flattened_error_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_dataset( - dataset_service.GetDatasetRequest(), - name='name_value', - ) - - -def test_update_dataset(transport: str = 'grpc', request_type=dataset_service.UpdateDatasetRequest): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_dataset.Dataset( - name='name_value', - display_name='display_name_value', - description='description_value', - metadata_schema_uri='metadata_schema_uri_value', - etag='etag_value', - ) - response = client.update_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.UpdateDatasetRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_dataset.Dataset) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.metadata_schema_uri == 'metadata_schema_uri_value' - assert response.etag == 'etag_value' - - -def test_update_dataset_from_dict(): - test_update_dataset(request_type=dict) - - -def test_update_dataset_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_dataset), - '__call__') as call: - client.update_dataset() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.UpdateDatasetRequest() - - -@pytest.mark.asyncio -async def test_update_dataset_async(transport: str = 'grpc_asyncio', request_type=dataset_service.UpdateDatasetRequest): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_dataset.Dataset( - name='name_value', - display_name='display_name_value', - description='description_value', - metadata_schema_uri='metadata_schema_uri_value', - etag='etag_value', - )) - response = await client.update_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.UpdateDatasetRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_dataset.Dataset) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.metadata_schema_uri == 'metadata_schema_uri_value' - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_update_dataset_async_from_dict(): - await test_update_dataset_async(request_type=dict) - - -def test_update_dataset_field_headers(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.UpdateDatasetRequest() - - request.dataset.name = 'dataset.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_dataset), - '__call__') as call: - call.return_value = gca_dataset.Dataset() - client.update_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'dataset.name=dataset.name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_dataset_field_headers_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.UpdateDatasetRequest() - - request.dataset.name = 'dataset.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_dataset), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_dataset.Dataset()) - await client.update_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'dataset.name=dataset.name/value', - ) in kw['metadata'] - - -def test_update_dataset_flattened(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_dataset.Dataset() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_dataset( - dataset=gca_dataset.Dataset(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].dataset - mock_val = gca_dataset.Dataset(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -def test_update_dataset_flattened_error(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_dataset( - dataset_service.UpdateDatasetRequest(), - dataset=gca_dataset.Dataset(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.asyncio -async def test_update_dataset_flattened_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_dataset.Dataset() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_dataset.Dataset()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_dataset( - dataset=gca_dataset.Dataset(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].dataset - mock_val = gca_dataset.Dataset(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_update_dataset_flattened_error_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_dataset( - dataset_service.UpdateDatasetRequest(), - dataset=gca_dataset.Dataset(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_list_datasets(transport: str = 'grpc', request_type=dataset_service.ListDatasetsRequest): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = dataset_service.ListDatasetsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_datasets(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.ListDatasetsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListDatasetsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_datasets_from_dict(): - test_list_datasets(request_type=dict) - - -def test_list_datasets_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__') as call: - client.list_datasets() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.ListDatasetsRequest() - - -@pytest.mark.asyncio -async def test_list_datasets_async(transport: str = 'grpc_asyncio', request_type=dataset_service.ListDatasetsRequest): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListDatasetsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_datasets(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.ListDatasetsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListDatasetsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_datasets_async_from_dict(): - await test_list_datasets_async(request_type=dict) - - -def test_list_datasets_field_headers(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.ListDatasetsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__') as call: - call.return_value = dataset_service.ListDatasetsResponse() - client.list_datasets(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_datasets_field_headers_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.ListDatasetsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListDatasetsResponse()) - await client.list_datasets(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_datasets_flattened(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = dataset_service.ListDatasetsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_datasets( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_datasets_flattened_error(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_datasets( - dataset_service.ListDatasetsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_datasets_flattened_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = dataset_service.ListDatasetsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListDatasetsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_datasets( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_datasets_flattened_error_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_datasets( - dataset_service.ListDatasetsRequest(), - parent='parent_value', - ) - - -def test_list_datasets_pager(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - dataset_service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - dataset.Dataset(), - dataset.Dataset(), - ], - next_page_token='abc', - ), - dataset_service.ListDatasetsResponse( - datasets=[], - next_page_token='def', - ), - dataset_service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - ], - next_page_token='ghi', - ), - dataset_service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - dataset.Dataset(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_datasets(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, dataset.Dataset) - for i in results) - -def test_list_datasets_pages(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - dataset_service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - dataset.Dataset(), - dataset.Dataset(), - ], - next_page_token='abc', - ), - dataset_service.ListDatasetsResponse( - datasets=[], - next_page_token='def', - ), - dataset_service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - ], - next_page_token='ghi', - ), - dataset_service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - dataset.Dataset(), - ], - ), - RuntimeError, - ) - pages = list(client.list_datasets(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_datasets_async_pager(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - dataset_service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - dataset.Dataset(), - dataset.Dataset(), - ], - next_page_token='abc', - ), - dataset_service.ListDatasetsResponse( - datasets=[], - next_page_token='def', - ), - dataset_service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - ], - next_page_token='ghi', - ), - dataset_service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - dataset.Dataset(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_datasets(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, dataset.Dataset) - for i in responses) - -@pytest.mark.asyncio -async def test_list_datasets_async_pages(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_datasets), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - dataset_service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - dataset.Dataset(), - dataset.Dataset(), - ], - next_page_token='abc', - ), - dataset_service.ListDatasetsResponse( - datasets=[], - next_page_token='def', - ), - dataset_service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - ], - next_page_token='ghi', - ), - dataset_service.ListDatasetsResponse( - datasets=[ - dataset.Dataset(), - dataset.Dataset(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_datasets(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_delete_dataset(transport: str = 'grpc', request_type=dataset_service.DeleteDatasetRequest): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.DeleteDatasetRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_dataset_from_dict(): - test_delete_dataset(request_type=dict) - - -def test_delete_dataset_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_dataset), - '__call__') as call: - client.delete_dataset() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.DeleteDatasetRequest() - - -@pytest.mark.asyncio -async def test_delete_dataset_async(transport: str = 'grpc_asyncio', request_type=dataset_service.DeleteDatasetRequest): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.DeleteDatasetRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_dataset_async_from_dict(): - await test_delete_dataset_async(request_type=dict) - - -def test_delete_dataset_field_headers(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.DeleteDatasetRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_dataset), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_dataset_field_headers_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.DeleteDatasetRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_dataset), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_dataset(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_dataset_flattened(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_dataset( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_delete_dataset_flattened_error(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_dataset( - dataset_service.DeleteDatasetRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_dataset_flattened_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_dataset), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_dataset( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_dataset_flattened_error_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_dataset( - dataset_service.DeleteDatasetRequest(), - name='name_value', - ) - - -def test_import_data(transport: str = 'grpc', request_type=dataset_service.ImportDataRequest): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.import_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.ImportDataRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_import_data_from_dict(): - test_import_data(request_type=dict) - - -def test_import_data_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_data), - '__call__') as call: - client.import_data() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.ImportDataRequest() - - -@pytest.mark.asyncio -async def test_import_data_async(transport: str = 'grpc_asyncio', request_type=dataset_service.ImportDataRequest): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.import_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.ImportDataRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_import_data_async_from_dict(): - await test_import_data_async(request_type=dict) - - -def test_import_data_field_headers(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.ImportDataRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_data), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.import_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_import_data_field_headers_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.ImportDataRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_data), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.import_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_import_data_flattened(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.import_data( - name='name_value', - import_configs=[dataset.ImportDataConfig(gcs_source=io.GcsSource(uris=['uris_value']))], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - arg = args[0].import_configs - mock_val = [dataset.ImportDataConfig(gcs_source=io.GcsSource(uris=['uris_value']))] - assert arg == mock_val - - -def test_import_data_flattened_error(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.import_data( - dataset_service.ImportDataRequest(), - name='name_value', - import_configs=[dataset.ImportDataConfig(gcs_source=io.GcsSource(uris=['uris_value']))], - ) - - -@pytest.mark.asyncio -async def test_import_data_flattened_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.import_data( - name='name_value', - import_configs=[dataset.ImportDataConfig(gcs_source=io.GcsSource(uris=['uris_value']))], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - arg = args[0].import_configs - mock_val = [dataset.ImportDataConfig(gcs_source=io.GcsSource(uris=['uris_value']))] - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_import_data_flattened_error_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.import_data( - dataset_service.ImportDataRequest(), - name='name_value', - import_configs=[dataset.ImportDataConfig(gcs_source=io.GcsSource(uris=['uris_value']))], - ) - - -def test_export_data(transport: str = 'grpc', request_type=dataset_service.ExportDataRequest): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.export_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.ExportDataRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_export_data_from_dict(): - test_export_data(request_type=dict) - - -def test_export_data_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_data), - '__call__') as call: - client.export_data() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.ExportDataRequest() - - -@pytest.mark.asyncio -async def test_export_data_async(transport: str = 'grpc_asyncio', request_type=dataset_service.ExportDataRequest): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.export_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.ExportDataRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_export_data_async_from_dict(): - await test_export_data_async(request_type=dict) - - -def test_export_data_field_headers(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.ExportDataRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_data), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.export_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_export_data_field_headers_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.ExportDataRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_data), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.export_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_export_data_flattened(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.export_data( - name='name_value', - export_config=dataset.ExportDataConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - arg = args[0].export_config - mock_val = dataset.ExportDataConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')) - assert arg == mock_val - - -def test_export_data_flattened_error(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.export_data( - dataset_service.ExportDataRequest(), - name='name_value', - export_config=dataset.ExportDataConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), - ) - - -@pytest.mark.asyncio -async def test_export_data_flattened_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.export_data( - name='name_value', - export_config=dataset.ExportDataConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - arg = args[0].export_config - mock_val = dataset.ExportDataConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')) - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_export_data_flattened_error_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.export_data( - dataset_service.ExportDataRequest(), - name='name_value', - export_config=dataset.ExportDataConfig(gcs_destination=io.GcsDestination(output_uri_prefix='output_uri_prefix_value')), - ) - - -def test_list_data_items(transport: str = 'grpc', request_type=dataset_service.ListDataItemsRequest): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_items), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = dataset_service.ListDataItemsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_data_items(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.ListDataItemsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListDataItemsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_data_items_from_dict(): - test_list_data_items(request_type=dict) - - -def test_list_data_items_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_items), - '__call__') as call: - client.list_data_items() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.ListDataItemsRequest() - - -@pytest.mark.asyncio -async def test_list_data_items_async(transport: str = 'grpc_asyncio', request_type=dataset_service.ListDataItemsRequest): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_items), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListDataItemsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_data_items(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.ListDataItemsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListDataItemsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_data_items_async_from_dict(): - await test_list_data_items_async(request_type=dict) - - -def test_list_data_items_field_headers(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.ListDataItemsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_items), - '__call__') as call: - call.return_value = dataset_service.ListDataItemsResponse() - client.list_data_items(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_data_items_field_headers_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.ListDataItemsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_items), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListDataItemsResponse()) - await client.list_data_items(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_data_items_flattened(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_items), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = dataset_service.ListDataItemsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_data_items( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_data_items_flattened_error(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_data_items( - dataset_service.ListDataItemsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_data_items_flattened_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_items), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = dataset_service.ListDataItemsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListDataItemsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_data_items( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_data_items_flattened_error_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_data_items( - dataset_service.ListDataItemsRequest(), - parent='parent_value', - ) - - -def test_list_data_items_pager(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_items), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - dataset_service.ListDataItemsResponse( - data_items=[ - data_item.DataItem(), - data_item.DataItem(), - data_item.DataItem(), - ], - next_page_token='abc', - ), - dataset_service.ListDataItemsResponse( - data_items=[], - next_page_token='def', - ), - dataset_service.ListDataItemsResponse( - data_items=[ - data_item.DataItem(), - ], - next_page_token='ghi', - ), - dataset_service.ListDataItemsResponse( - data_items=[ - data_item.DataItem(), - data_item.DataItem(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_data_items(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, data_item.DataItem) - for i in results) - -def test_list_data_items_pages(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_items), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - dataset_service.ListDataItemsResponse( - data_items=[ - data_item.DataItem(), - data_item.DataItem(), - data_item.DataItem(), - ], - next_page_token='abc', - ), - dataset_service.ListDataItemsResponse( - data_items=[], - next_page_token='def', - ), - dataset_service.ListDataItemsResponse( - data_items=[ - data_item.DataItem(), - ], - next_page_token='ghi', - ), - dataset_service.ListDataItemsResponse( - data_items=[ - data_item.DataItem(), - data_item.DataItem(), - ], - ), - RuntimeError, - ) - pages = list(client.list_data_items(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_data_items_async_pager(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_items), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - dataset_service.ListDataItemsResponse( - data_items=[ - data_item.DataItem(), - data_item.DataItem(), - data_item.DataItem(), - ], - next_page_token='abc', - ), - dataset_service.ListDataItemsResponse( - data_items=[], - next_page_token='def', - ), - dataset_service.ListDataItemsResponse( - data_items=[ - data_item.DataItem(), - ], - next_page_token='ghi', - ), - dataset_service.ListDataItemsResponse( - data_items=[ - data_item.DataItem(), - data_item.DataItem(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_data_items(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, data_item.DataItem) - for i in responses) - -@pytest.mark.asyncio -async def test_list_data_items_async_pages(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_items), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - dataset_service.ListDataItemsResponse( - data_items=[ - data_item.DataItem(), - data_item.DataItem(), - data_item.DataItem(), - ], - next_page_token='abc', - ), - dataset_service.ListDataItemsResponse( - data_items=[], - next_page_token='def', - ), - dataset_service.ListDataItemsResponse( - data_items=[ - data_item.DataItem(), - ], - next_page_token='ghi', - ), - dataset_service.ListDataItemsResponse( - data_items=[ - data_item.DataItem(), - data_item.DataItem(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_data_items(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_get_annotation_spec(transport: str = 'grpc', request_type=dataset_service.GetAnnotationSpecRequest): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_annotation_spec), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = annotation_spec.AnnotationSpec( - name='name_value', - display_name='display_name_value', - etag='etag_value', - ) - response = client.get_annotation_spec(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.GetAnnotationSpecRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, annotation_spec.AnnotationSpec) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.etag == 'etag_value' - - -def test_get_annotation_spec_from_dict(): - test_get_annotation_spec(request_type=dict) - - -def test_get_annotation_spec_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_annotation_spec), - '__call__') as call: - client.get_annotation_spec() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.GetAnnotationSpecRequest() - - -@pytest.mark.asyncio -async def test_get_annotation_spec_async(transport: str = 'grpc_asyncio', request_type=dataset_service.GetAnnotationSpecRequest): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_annotation_spec), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(annotation_spec.AnnotationSpec( - name='name_value', - display_name='display_name_value', - etag='etag_value', - )) - response = await client.get_annotation_spec(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.GetAnnotationSpecRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, annotation_spec.AnnotationSpec) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_get_annotation_spec_async_from_dict(): - await test_get_annotation_spec_async(request_type=dict) - - -def test_get_annotation_spec_field_headers(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.GetAnnotationSpecRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_annotation_spec), - '__call__') as call: - call.return_value = annotation_spec.AnnotationSpec() - client.get_annotation_spec(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_annotation_spec_field_headers_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.GetAnnotationSpecRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_annotation_spec), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(annotation_spec.AnnotationSpec()) - await client.get_annotation_spec(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_annotation_spec_flattened(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_annotation_spec), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = annotation_spec.AnnotationSpec() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_annotation_spec( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_annotation_spec_flattened_error(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_annotation_spec( - dataset_service.GetAnnotationSpecRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_annotation_spec_flattened_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_annotation_spec), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = annotation_spec.AnnotationSpec() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(annotation_spec.AnnotationSpec()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_annotation_spec( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_annotation_spec_flattened_error_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_annotation_spec( - dataset_service.GetAnnotationSpecRequest(), - name='name_value', - ) - - -def test_list_annotations(transport: str = 'grpc', request_type=dataset_service.ListAnnotationsRequest): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_annotations), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = dataset_service.ListAnnotationsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_annotations(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.ListAnnotationsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListAnnotationsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_annotations_from_dict(): - test_list_annotations(request_type=dict) - - -def test_list_annotations_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_annotations), - '__call__') as call: - client.list_annotations() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.ListAnnotationsRequest() - - -@pytest.mark.asyncio -async def test_list_annotations_async(transport: str = 'grpc_asyncio', request_type=dataset_service.ListAnnotationsRequest): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_annotations), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListAnnotationsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_annotations(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == dataset_service.ListAnnotationsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListAnnotationsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_annotations_async_from_dict(): - await test_list_annotations_async(request_type=dict) - - -def test_list_annotations_field_headers(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.ListAnnotationsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_annotations), - '__call__') as call: - call.return_value = dataset_service.ListAnnotationsResponse() - client.list_annotations(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_annotations_field_headers_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = dataset_service.ListAnnotationsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_annotations), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListAnnotationsResponse()) - await client.list_annotations(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_annotations_flattened(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_annotations), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = dataset_service.ListAnnotationsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_annotations( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_annotations_flattened_error(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_annotations( - dataset_service.ListAnnotationsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_annotations_flattened_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_annotations), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = dataset_service.ListAnnotationsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset_service.ListAnnotationsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_annotations( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_annotations_flattened_error_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_annotations( - dataset_service.ListAnnotationsRequest(), - parent='parent_value', - ) - - -def test_list_annotations_pager(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_annotations), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - dataset_service.ListAnnotationsResponse( - annotations=[ - annotation.Annotation(), - annotation.Annotation(), - annotation.Annotation(), - ], - next_page_token='abc', - ), - dataset_service.ListAnnotationsResponse( - annotations=[], - next_page_token='def', - ), - dataset_service.ListAnnotationsResponse( - annotations=[ - annotation.Annotation(), - ], - next_page_token='ghi', - ), - dataset_service.ListAnnotationsResponse( - annotations=[ - annotation.Annotation(), - annotation.Annotation(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_annotations(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, annotation.Annotation) - for i in results) - -def test_list_annotations_pages(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_annotations), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - dataset_service.ListAnnotationsResponse( - annotations=[ - annotation.Annotation(), - annotation.Annotation(), - annotation.Annotation(), - ], - next_page_token='abc', - ), - dataset_service.ListAnnotationsResponse( - annotations=[], - next_page_token='def', - ), - dataset_service.ListAnnotationsResponse( - annotations=[ - annotation.Annotation(), - ], - next_page_token='ghi', - ), - dataset_service.ListAnnotationsResponse( - annotations=[ - annotation.Annotation(), - annotation.Annotation(), - ], - ), - RuntimeError, - ) - pages = list(client.list_annotations(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_annotations_async_pager(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_annotations), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - dataset_service.ListAnnotationsResponse( - annotations=[ - annotation.Annotation(), - annotation.Annotation(), - annotation.Annotation(), - ], - next_page_token='abc', - ), - dataset_service.ListAnnotationsResponse( - annotations=[], - next_page_token='def', - ), - dataset_service.ListAnnotationsResponse( - annotations=[ - annotation.Annotation(), - ], - next_page_token='ghi', - ), - dataset_service.ListAnnotationsResponse( - annotations=[ - annotation.Annotation(), - annotation.Annotation(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_annotations(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, annotation.Annotation) - for i in responses) - -@pytest.mark.asyncio -async def test_list_annotations_async_pages(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_annotations), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - dataset_service.ListAnnotationsResponse( - annotations=[ - annotation.Annotation(), - annotation.Annotation(), - annotation.Annotation(), - ], - next_page_token='abc', - ), - dataset_service.ListAnnotationsResponse( - annotations=[], - next_page_token='def', - ), - dataset_service.ListAnnotationsResponse( - annotations=[ - annotation.Annotation(), - ], - next_page_token='ghi', - ), - dataset_service.ListAnnotationsResponse( - annotations=[ - annotation.Annotation(), - annotation.Annotation(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_annotations(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.DatasetServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.DatasetServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = DatasetServiceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.DatasetServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = DatasetServiceClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.DatasetServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = DatasetServiceClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.DatasetServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.DatasetServiceGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.DatasetServiceGrpcTransport, - transports.DatasetServiceGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.DatasetServiceGrpcTransport, - ) - -def test_dataset_service_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.DatasetServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_dataset_service_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.aiplatform_v1beta1.services.dataset_service.transports.DatasetServiceTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.DatasetServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'create_dataset', - 'get_dataset', - 'update_dataset', - 'list_datasets', - 'delete_dataset', - 'import_data', - 'export_data', - 'list_data_items', - 'get_annotation_spec', - 'list_annotations', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - with pytest.raises(NotImplementedError): - transport.close() - - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client - - -def test_dataset_service_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.dataset_service.transports.DatasetServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.DatasetServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -def test_dataset_service_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.dataset_service.transports.DatasetServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.DatasetServiceTransport() - adc.assert_called_once() - - -def test_dataset_service_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - DatasetServiceClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.DatasetServiceGrpcTransport, - transports.DatasetServiceGrpcAsyncIOTransport, - ], -) -def test_dataset_service_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.DatasetServiceGrpcTransport, grpc_helpers), - (transports.DatasetServiceGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_dataset_service_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "aiplatform.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="aiplatform.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.DatasetServiceGrpcTransport, transports.DatasetServiceGrpcAsyncIOTransport]) -def test_dataset_service_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_dataset_service_host_no_port(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_dataset_service_host_with_port(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:8000' - -def test_dataset_service_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.DatasetServiceGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_dataset_service_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.DatasetServiceGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.DatasetServiceGrpcTransport, transports.DatasetServiceGrpcAsyncIOTransport]) -def test_dataset_service_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.DatasetServiceGrpcTransport, transports.DatasetServiceGrpcAsyncIOTransport]) -def test_dataset_service_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_dataset_service_grpc_lro_client(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_dataset_service_grpc_lro_async_client(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc_asyncio', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsAsyncClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_annotation_path(): - project = "squid" - location = "clam" - dataset = "whelk" - data_item = "octopus" - annotation = "oyster" - expected = "projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}/annotations/{annotation}".format(project=project, location=location, dataset=dataset, data_item=data_item, annotation=annotation, ) - actual = DatasetServiceClient.annotation_path(project, location, dataset, data_item, annotation) - assert expected == actual - - -def test_parse_annotation_path(): - expected = { - "project": "nudibranch", - "location": "cuttlefish", - "dataset": "mussel", - "data_item": "winkle", - "annotation": "nautilus", - } - path = DatasetServiceClient.annotation_path(**expected) - - # Check that the path construction is reversible. - actual = DatasetServiceClient.parse_annotation_path(path) - assert expected == actual - -def test_annotation_spec_path(): - project = "scallop" - location = "abalone" - dataset = "squid" - annotation_spec = "clam" - expected = "projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}".format(project=project, location=location, dataset=dataset, annotation_spec=annotation_spec, ) - actual = DatasetServiceClient.annotation_spec_path(project, location, dataset, annotation_spec) - assert expected == actual - - -def test_parse_annotation_spec_path(): - expected = { - "project": "whelk", - "location": "octopus", - "dataset": "oyster", - "annotation_spec": "nudibranch", - } - path = DatasetServiceClient.annotation_spec_path(**expected) - - # Check that the path construction is reversible. - actual = DatasetServiceClient.parse_annotation_spec_path(path) - assert expected == actual - -def test_data_item_path(): - project = "cuttlefish" - location = "mussel" - dataset = "winkle" - data_item = "nautilus" - expected = "projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}".format(project=project, location=location, dataset=dataset, data_item=data_item, ) - actual = DatasetServiceClient.data_item_path(project, location, dataset, data_item) - assert expected == actual - - -def test_parse_data_item_path(): - expected = { - "project": "scallop", - "location": "abalone", - "dataset": "squid", - "data_item": "clam", - } - path = DatasetServiceClient.data_item_path(**expected) - - # Check that the path construction is reversible. - actual = DatasetServiceClient.parse_data_item_path(path) - assert expected == actual - -def test_dataset_path(): - project = "whelk" - location = "octopus" - dataset = "oyster" - expected = "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) - actual = DatasetServiceClient.dataset_path(project, location, dataset) - assert expected == actual - - -def test_parse_dataset_path(): - expected = { - "project": "nudibranch", - "location": "cuttlefish", - "dataset": "mussel", - } - path = DatasetServiceClient.dataset_path(**expected) - - # Check that the path construction is reversible. - actual = DatasetServiceClient.parse_dataset_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "winkle" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = DatasetServiceClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "nautilus", - } - path = DatasetServiceClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = DatasetServiceClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "scallop" - expected = "folders/{folder}".format(folder=folder, ) - actual = DatasetServiceClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "abalone", - } - path = DatasetServiceClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = DatasetServiceClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "squid" - expected = "organizations/{organization}".format(organization=organization, ) - actual = DatasetServiceClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "clam", - } - path = DatasetServiceClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = DatasetServiceClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "whelk" - expected = "projects/{project}".format(project=project, ) - actual = DatasetServiceClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "octopus", - } - path = DatasetServiceClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = DatasetServiceClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "oyster" - location = "nudibranch" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = DatasetServiceClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "cuttlefish", - "location": "mussel", - } - path = DatasetServiceClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = DatasetServiceClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.DatasetServiceTransport, '_prep_wrapped_messages') as prep: - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.DatasetServiceTransport, '_prep_wrapped_messages') as prep: - transport_class = DatasetServiceClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - -@pytest.mark.asyncio -async def test_transport_close_async(): - client = DatasetServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: - async with client: - close.assert_not_called() - close.assert_called_once() - -def test_transport_close(): - transports = { - "grpc": "_grpc_channel", - } - - for transport, close_name in transports.items(): - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: - with client: - close.assert_not_called() - close.assert_called_once() - -def test_client_ctx(): - transports = [ - 'grpc', - ] - for transport in transports: - client = DatasetServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - # Test client calls underlying transport. - with mock.patch.object(type(client.transport), "close") as close: - close.assert_not_called() - with client: - pass - close.assert_called() diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_endpoint_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_endpoint_service.py deleted file mode 100644 index 20b6a1d01d..0000000000 --- a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_endpoint_service.py +++ /dev/null @@ -1,2978 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import future -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import operation_async # type: ignore -from google.api_core import operations_v1 -from google.api_core import path_template -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.aiplatform_v1beta1.services.endpoint_service import EndpointServiceAsyncClient -from google.cloud.aiplatform_v1beta1.services.endpoint_service import EndpointServiceClient -from google.cloud.aiplatform_v1beta1.services.endpoint_service import pagers -from google.cloud.aiplatform_v1beta1.services.endpoint_service import transports -from google.cloud.aiplatform_v1beta1.types import accelerator_type -from google.cloud.aiplatform_v1beta1.types import encryption_spec -from google.cloud.aiplatform_v1beta1.types import endpoint -from google.cloud.aiplatform_v1beta1.types import endpoint as gca_endpoint -from google.cloud.aiplatform_v1beta1.types import endpoint_service -from google.cloud.aiplatform_v1beta1.types import explanation -from google.cloud.aiplatform_v1beta1.types import explanation_metadata -from google.cloud.aiplatform_v1beta1.types import io -from google.cloud.aiplatform_v1beta1.types import machine_resources -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.longrunning import operations_pb2 -from google.oauth2 import service_account -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -import google.auth - - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert EndpointServiceClient._get_default_mtls_endpoint(None) is None - assert EndpointServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert EndpointServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert EndpointServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert EndpointServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert EndpointServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - EndpointServiceClient, - EndpointServiceAsyncClient, -]) -def test_endpoint_service_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.EndpointServiceGrpcTransport, "grpc"), - (transports.EndpointServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_endpoint_service_client_service_account_always_use_jwt(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=False) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("client_class", [ - EndpointServiceClient, - EndpointServiceAsyncClient, -]) -def test_endpoint_service_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_endpoint_service_client_get_transport_class(): - transport = EndpointServiceClient.get_transport_class() - available_transports = [ - transports.EndpointServiceGrpcTransport, - ] - assert transport in available_transports - - transport = EndpointServiceClient.get_transport_class("grpc") - assert transport == transports.EndpointServiceGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (EndpointServiceClient, transports.EndpointServiceGrpcTransport, "grpc"), - (EndpointServiceAsyncClient, transports.EndpointServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(EndpointServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(EndpointServiceClient)) -@mock.patch.object(EndpointServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(EndpointServiceAsyncClient)) -def test_endpoint_service_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(EndpointServiceClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(EndpointServiceClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (EndpointServiceClient, transports.EndpointServiceGrpcTransport, "grpc", "true"), - (EndpointServiceAsyncClient, transports.EndpointServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (EndpointServiceClient, transports.EndpointServiceGrpcTransport, "grpc", "false"), - (EndpointServiceAsyncClient, transports.EndpointServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(EndpointServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(EndpointServiceClient)) -@mock.patch.object(EndpointServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(EndpointServiceAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_endpoint_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (EndpointServiceClient, transports.EndpointServiceGrpcTransport, "grpc"), - (EndpointServiceAsyncClient, transports.EndpointServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_endpoint_service_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (EndpointServiceClient, transports.EndpointServiceGrpcTransport, "grpc"), - (EndpointServiceAsyncClient, transports.EndpointServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_endpoint_service_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_endpoint_service_client_client_options_from_dict(): - with mock.patch('google.cloud.aiplatform_v1beta1.services.endpoint_service.transports.EndpointServiceGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = EndpointServiceClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_create_endpoint(transport: str = 'grpc', request_type=endpoint_service.CreateEndpointRequest): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.create_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.CreateEndpointRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_create_endpoint_from_dict(): - test_create_endpoint(request_type=dict) - - -def test_create_endpoint_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_endpoint), - '__call__') as call: - client.create_endpoint() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.CreateEndpointRequest() - - -@pytest.mark.asyncio -async def test_create_endpoint_async(transport: str = 'grpc_asyncio', request_type=endpoint_service.CreateEndpointRequest): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.create_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.CreateEndpointRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_create_endpoint_async_from_dict(): - await test_create_endpoint_async(request_type=dict) - - -def test_create_endpoint_field_headers(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = endpoint_service.CreateEndpointRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_endpoint), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.create_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_endpoint_field_headers_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = endpoint_service.CreateEndpointRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_endpoint), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.create_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_endpoint_flattened(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_endpoint( - parent='parent_value', - endpoint=gca_endpoint.Endpoint(name='name_value'), - endpoint_id='endpoint_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].endpoint - mock_val = gca_endpoint.Endpoint(name='name_value') - assert arg == mock_val - arg = args[0].endpoint_id - mock_val = 'endpoint_id_value' - assert arg == mock_val - - -def test_create_endpoint_flattened_error(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_endpoint( - endpoint_service.CreateEndpointRequest(), - parent='parent_value', - endpoint=gca_endpoint.Endpoint(name='name_value'), - endpoint_id='endpoint_id_value', - ) - - -@pytest.mark.asyncio -async def test_create_endpoint_flattened_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_endpoint( - parent='parent_value', - endpoint=gca_endpoint.Endpoint(name='name_value'), - endpoint_id='endpoint_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].endpoint - mock_val = gca_endpoint.Endpoint(name='name_value') - assert arg == mock_val - arg = args[0].endpoint_id - mock_val = 'endpoint_id_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_endpoint_flattened_error_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_endpoint( - endpoint_service.CreateEndpointRequest(), - parent='parent_value', - endpoint=gca_endpoint.Endpoint(name='name_value'), - endpoint_id='endpoint_id_value', - ) - - -def test_get_endpoint(transport: str = 'grpc', request_type=endpoint_service.GetEndpointRequest): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = endpoint.Endpoint( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - network='network_value', - enable_private_service_connect=True, - model_deployment_monitoring_job='model_deployment_monitoring_job_value', - ) - response = client.get_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.GetEndpointRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, endpoint.Endpoint) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - assert response.network == 'network_value' - assert response.enable_private_service_connect is True - assert response.model_deployment_monitoring_job == 'model_deployment_monitoring_job_value' - - -def test_get_endpoint_from_dict(): - test_get_endpoint(request_type=dict) - - -def test_get_endpoint_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_endpoint), - '__call__') as call: - client.get_endpoint() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.GetEndpointRequest() - - -@pytest.mark.asyncio -async def test_get_endpoint_async(transport: str = 'grpc_asyncio', request_type=endpoint_service.GetEndpointRequest): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(endpoint.Endpoint( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - network='network_value', - enable_private_service_connect=True, - model_deployment_monitoring_job='model_deployment_monitoring_job_value', - )) - response = await client.get_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.GetEndpointRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, endpoint.Endpoint) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - assert response.network == 'network_value' - assert response.enable_private_service_connect is True - assert response.model_deployment_monitoring_job == 'model_deployment_monitoring_job_value' - - -@pytest.mark.asyncio -async def test_get_endpoint_async_from_dict(): - await test_get_endpoint_async(request_type=dict) - - -def test_get_endpoint_field_headers(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = endpoint_service.GetEndpointRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_endpoint), - '__call__') as call: - call.return_value = endpoint.Endpoint() - client.get_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_endpoint_field_headers_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = endpoint_service.GetEndpointRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_endpoint), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(endpoint.Endpoint()) - await client.get_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_endpoint_flattened(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = endpoint.Endpoint() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_endpoint( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_endpoint_flattened_error(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_endpoint( - endpoint_service.GetEndpointRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_endpoint_flattened_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = endpoint.Endpoint() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(endpoint.Endpoint()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_endpoint( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_endpoint_flattened_error_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_endpoint( - endpoint_service.GetEndpointRequest(), - name='name_value', - ) - - -def test_list_endpoints(transport: str = 'grpc', request_type=endpoint_service.ListEndpointsRequest): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_endpoints), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = endpoint_service.ListEndpointsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_endpoints(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.ListEndpointsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListEndpointsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_endpoints_from_dict(): - test_list_endpoints(request_type=dict) - - -def test_list_endpoints_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_endpoints), - '__call__') as call: - client.list_endpoints() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.ListEndpointsRequest() - - -@pytest.mark.asyncio -async def test_list_endpoints_async(transport: str = 'grpc_asyncio', request_type=endpoint_service.ListEndpointsRequest): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_endpoints), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(endpoint_service.ListEndpointsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_endpoints(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.ListEndpointsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListEndpointsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_endpoints_async_from_dict(): - await test_list_endpoints_async(request_type=dict) - - -def test_list_endpoints_field_headers(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = endpoint_service.ListEndpointsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_endpoints), - '__call__') as call: - call.return_value = endpoint_service.ListEndpointsResponse() - client.list_endpoints(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_endpoints_field_headers_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = endpoint_service.ListEndpointsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_endpoints), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(endpoint_service.ListEndpointsResponse()) - await client.list_endpoints(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_endpoints_flattened(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_endpoints), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = endpoint_service.ListEndpointsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_endpoints( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_endpoints_flattened_error(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_endpoints( - endpoint_service.ListEndpointsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_endpoints_flattened_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_endpoints), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = endpoint_service.ListEndpointsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(endpoint_service.ListEndpointsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_endpoints( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_endpoints_flattened_error_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_endpoints( - endpoint_service.ListEndpointsRequest(), - parent='parent_value', - ) - - -def test_list_endpoints_pager(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_endpoints), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - endpoint_service.ListEndpointsResponse( - endpoints=[ - endpoint.Endpoint(), - endpoint.Endpoint(), - endpoint.Endpoint(), - ], - next_page_token='abc', - ), - endpoint_service.ListEndpointsResponse( - endpoints=[], - next_page_token='def', - ), - endpoint_service.ListEndpointsResponse( - endpoints=[ - endpoint.Endpoint(), - ], - next_page_token='ghi', - ), - endpoint_service.ListEndpointsResponse( - endpoints=[ - endpoint.Endpoint(), - endpoint.Endpoint(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_endpoints(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, endpoint.Endpoint) - for i in results) - -def test_list_endpoints_pages(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_endpoints), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - endpoint_service.ListEndpointsResponse( - endpoints=[ - endpoint.Endpoint(), - endpoint.Endpoint(), - endpoint.Endpoint(), - ], - next_page_token='abc', - ), - endpoint_service.ListEndpointsResponse( - endpoints=[], - next_page_token='def', - ), - endpoint_service.ListEndpointsResponse( - endpoints=[ - endpoint.Endpoint(), - ], - next_page_token='ghi', - ), - endpoint_service.ListEndpointsResponse( - endpoints=[ - endpoint.Endpoint(), - endpoint.Endpoint(), - ], - ), - RuntimeError, - ) - pages = list(client.list_endpoints(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_endpoints_async_pager(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_endpoints), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - endpoint_service.ListEndpointsResponse( - endpoints=[ - endpoint.Endpoint(), - endpoint.Endpoint(), - endpoint.Endpoint(), - ], - next_page_token='abc', - ), - endpoint_service.ListEndpointsResponse( - endpoints=[], - next_page_token='def', - ), - endpoint_service.ListEndpointsResponse( - endpoints=[ - endpoint.Endpoint(), - ], - next_page_token='ghi', - ), - endpoint_service.ListEndpointsResponse( - endpoints=[ - endpoint.Endpoint(), - endpoint.Endpoint(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_endpoints(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, endpoint.Endpoint) - for i in responses) - -@pytest.mark.asyncio -async def test_list_endpoints_async_pages(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_endpoints), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - endpoint_service.ListEndpointsResponse( - endpoints=[ - endpoint.Endpoint(), - endpoint.Endpoint(), - endpoint.Endpoint(), - ], - next_page_token='abc', - ), - endpoint_service.ListEndpointsResponse( - endpoints=[], - next_page_token='def', - ), - endpoint_service.ListEndpointsResponse( - endpoints=[ - endpoint.Endpoint(), - ], - next_page_token='ghi', - ), - endpoint_service.ListEndpointsResponse( - endpoints=[ - endpoint.Endpoint(), - endpoint.Endpoint(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_endpoints(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_update_endpoint(transport: str = 'grpc', request_type=endpoint_service.UpdateEndpointRequest): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_endpoint.Endpoint( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - network='network_value', - enable_private_service_connect=True, - model_deployment_monitoring_job='model_deployment_monitoring_job_value', - ) - response = client.update_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.UpdateEndpointRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_endpoint.Endpoint) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - assert response.network == 'network_value' - assert response.enable_private_service_connect is True - assert response.model_deployment_monitoring_job == 'model_deployment_monitoring_job_value' - - -def test_update_endpoint_from_dict(): - test_update_endpoint(request_type=dict) - - -def test_update_endpoint_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_endpoint), - '__call__') as call: - client.update_endpoint() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.UpdateEndpointRequest() - - -@pytest.mark.asyncio -async def test_update_endpoint_async(transport: str = 'grpc_asyncio', request_type=endpoint_service.UpdateEndpointRequest): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_endpoint.Endpoint( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - network='network_value', - enable_private_service_connect=True, - model_deployment_monitoring_job='model_deployment_monitoring_job_value', - )) - response = await client.update_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.UpdateEndpointRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_endpoint.Endpoint) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - assert response.network == 'network_value' - assert response.enable_private_service_connect is True - assert response.model_deployment_monitoring_job == 'model_deployment_monitoring_job_value' - - -@pytest.mark.asyncio -async def test_update_endpoint_async_from_dict(): - await test_update_endpoint_async(request_type=dict) - - -def test_update_endpoint_field_headers(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = endpoint_service.UpdateEndpointRequest() - - request.endpoint.name = 'endpoint.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_endpoint), - '__call__') as call: - call.return_value = gca_endpoint.Endpoint() - client.update_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'endpoint.name=endpoint.name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_endpoint_field_headers_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = endpoint_service.UpdateEndpointRequest() - - request.endpoint.name = 'endpoint.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_endpoint), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_endpoint.Endpoint()) - await client.update_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'endpoint.name=endpoint.name/value', - ) in kw['metadata'] - - -def test_update_endpoint_flattened(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_endpoint.Endpoint() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_endpoint( - endpoint=gca_endpoint.Endpoint(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].endpoint - mock_val = gca_endpoint.Endpoint(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -def test_update_endpoint_flattened_error(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_endpoint( - endpoint_service.UpdateEndpointRequest(), - endpoint=gca_endpoint.Endpoint(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.asyncio -async def test_update_endpoint_flattened_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_endpoint.Endpoint() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_endpoint.Endpoint()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_endpoint( - endpoint=gca_endpoint.Endpoint(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].endpoint - mock_val = gca_endpoint.Endpoint(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_update_endpoint_flattened_error_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_endpoint( - endpoint_service.UpdateEndpointRequest(), - endpoint=gca_endpoint.Endpoint(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_delete_endpoint(transport: str = 'grpc', request_type=endpoint_service.DeleteEndpointRequest): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.DeleteEndpointRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_endpoint_from_dict(): - test_delete_endpoint(request_type=dict) - - -def test_delete_endpoint_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_endpoint), - '__call__') as call: - client.delete_endpoint() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.DeleteEndpointRequest() - - -@pytest.mark.asyncio -async def test_delete_endpoint_async(transport: str = 'grpc_asyncio', request_type=endpoint_service.DeleteEndpointRequest): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.DeleteEndpointRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_endpoint_async_from_dict(): - await test_delete_endpoint_async(request_type=dict) - - -def test_delete_endpoint_field_headers(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = endpoint_service.DeleteEndpointRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_endpoint), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_endpoint_field_headers_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = endpoint_service.DeleteEndpointRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_endpoint), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_endpoint_flattened(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_endpoint( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_delete_endpoint_flattened_error(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_endpoint( - endpoint_service.DeleteEndpointRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_endpoint_flattened_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_endpoint( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_endpoint_flattened_error_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_endpoint( - endpoint_service.DeleteEndpointRequest(), - name='name_value', - ) - - -def test_deploy_model(transport: str = 'grpc', request_type=endpoint_service.DeployModelRequest): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.deploy_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.DeployModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_deploy_model_from_dict(): - test_deploy_model(request_type=dict) - - -def test_deploy_model_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_model), - '__call__') as call: - client.deploy_model() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.DeployModelRequest() - - -@pytest.mark.asyncio -async def test_deploy_model_async(transport: str = 'grpc_asyncio', request_type=endpoint_service.DeployModelRequest): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.deploy_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.DeployModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_deploy_model_async_from_dict(): - await test_deploy_model_async(request_type=dict) - - -def test_deploy_model_field_headers(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = endpoint_service.DeployModelRequest() - - request.endpoint = 'endpoint/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_model), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.deploy_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'endpoint=endpoint/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_deploy_model_field_headers_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = endpoint_service.DeployModelRequest() - - request.endpoint = 'endpoint/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_model), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.deploy_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'endpoint=endpoint/value', - ) in kw['metadata'] - - -def test_deploy_model_flattened(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.deploy_model( - endpoint='endpoint_value', - deployed_model=gca_endpoint.DeployedModel(dedicated_resources=machine_resources.DedicatedResources(machine_spec=machine_resources.MachineSpec(machine_type='machine_type_value'))), - traffic_split={'key_value': 541}, - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].endpoint - mock_val = 'endpoint_value' - assert arg == mock_val - arg = args[0].deployed_model - mock_val = gca_endpoint.DeployedModel(dedicated_resources=machine_resources.DedicatedResources(machine_spec=machine_resources.MachineSpec(machine_type='machine_type_value'))) - assert arg == mock_val - arg = args[0].traffic_split - mock_val = {'key_value': 541} - assert arg == mock_val - - -def test_deploy_model_flattened_error(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.deploy_model( - endpoint_service.DeployModelRequest(), - endpoint='endpoint_value', - deployed_model=gca_endpoint.DeployedModel(dedicated_resources=machine_resources.DedicatedResources(machine_spec=machine_resources.MachineSpec(machine_type='machine_type_value'))), - traffic_split={'key_value': 541}, - ) - - -@pytest.mark.asyncio -async def test_deploy_model_flattened_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.deploy_model( - endpoint='endpoint_value', - deployed_model=gca_endpoint.DeployedModel(dedicated_resources=machine_resources.DedicatedResources(machine_spec=machine_resources.MachineSpec(machine_type='machine_type_value'))), - traffic_split={'key_value': 541}, - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].endpoint - mock_val = 'endpoint_value' - assert arg == mock_val - arg = args[0].deployed_model - mock_val = gca_endpoint.DeployedModel(dedicated_resources=machine_resources.DedicatedResources(machine_spec=machine_resources.MachineSpec(machine_type='machine_type_value'))) - assert arg == mock_val - arg = args[0].traffic_split - mock_val = {'key_value': 541} - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_deploy_model_flattened_error_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.deploy_model( - endpoint_service.DeployModelRequest(), - endpoint='endpoint_value', - deployed_model=gca_endpoint.DeployedModel(dedicated_resources=machine_resources.DedicatedResources(machine_spec=machine_resources.MachineSpec(machine_type='machine_type_value'))), - traffic_split={'key_value': 541}, - ) - - -def test_undeploy_model(transport: str = 'grpc', request_type=endpoint_service.UndeployModelRequest): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.undeploy_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.UndeployModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_undeploy_model_from_dict(): - test_undeploy_model(request_type=dict) - - -def test_undeploy_model_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_model), - '__call__') as call: - client.undeploy_model() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.UndeployModelRequest() - - -@pytest.mark.asyncio -async def test_undeploy_model_async(transport: str = 'grpc_asyncio', request_type=endpoint_service.UndeployModelRequest): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.undeploy_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == endpoint_service.UndeployModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_undeploy_model_async_from_dict(): - await test_undeploy_model_async(request_type=dict) - - -def test_undeploy_model_field_headers(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = endpoint_service.UndeployModelRequest() - - request.endpoint = 'endpoint/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_model), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.undeploy_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'endpoint=endpoint/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_undeploy_model_field_headers_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = endpoint_service.UndeployModelRequest() - - request.endpoint = 'endpoint/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_model), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.undeploy_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'endpoint=endpoint/value', - ) in kw['metadata'] - - -def test_undeploy_model_flattened(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.undeploy_model( - endpoint='endpoint_value', - deployed_model_id='deployed_model_id_value', - traffic_split={'key_value': 541}, - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].endpoint - mock_val = 'endpoint_value' - assert arg == mock_val - arg = args[0].deployed_model_id - mock_val = 'deployed_model_id_value' - assert arg == mock_val - arg = args[0].traffic_split - mock_val = {'key_value': 541} - assert arg == mock_val - - -def test_undeploy_model_flattened_error(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.undeploy_model( - endpoint_service.UndeployModelRequest(), - endpoint='endpoint_value', - deployed_model_id='deployed_model_id_value', - traffic_split={'key_value': 541}, - ) - - -@pytest.mark.asyncio -async def test_undeploy_model_flattened_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.undeploy_model( - endpoint='endpoint_value', - deployed_model_id='deployed_model_id_value', - traffic_split={'key_value': 541}, - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].endpoint - mock_val = 'endpoint_value' - assert arg == mock_val - arg = args[0].deployed_model_id - mock_val = 'deployed_model_id_value' - assert arg == mock_val - arg = args[0].traffic_split - mock_val = {'key_value': 541} - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_undeploy_model_flattened_error_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.undeploy_model( - endpoint_service.UndeployModelRequest(), - endpoint='endpoint_value', - deployed_model_id='deployed_model_id_value', - traffic_split={'key_value': 541}, - ) - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.EndpointServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.EndpointServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = EndpointServiceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.EndpointServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = EndpointServiceClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.EndpointServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = EndpointServiceClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.EndpointServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.EndpointServiceGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.EndpointServiceGrpcTransport, - transports.EndpointServiceGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.EndpointServiceGrpcTransport, - ) - -def test_endpoint_service_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.EndpointServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_endpoint_service_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.aiplatform_v1beta1.services.endpoint_service.transports.EndpointServiceTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.EndpointServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'create_endpoint', - 'get_endpoint', - 'list_endpoints', - 'update_endpoint', - 'delete_endpoint', - 'deploy_model', - 'undeploy_model', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - with pytest.raises(NotImplementedError): - transport.close() - - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client - - -def test_endpoint_service_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.endpoint_service.transports.EndpointServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.EndpointServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -def test_endpoint_service_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.endpoint_service.transports.EndpointServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.EndpointServiceTransport() - adc.assert_called_once() - - -def test_endpoint_service_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - EndpointServiceClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.EndpointServiceGrpcTransport, - transports.EndpointServiceGrpcAsyncIOTransport, - ], -) -def test_endpoint_service_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.EndpointServiceGrpcTransport, grpc_helpers), - (transports.EndpointServiceGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_endpoint_service_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "aiplatform.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="aiplatform.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.EndpointServiceGrpcTransport, transports.EndpointServiceGrpcAsyncIOTransport]) -def test_endpoint_service_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_endpoint_service_host_no_port(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_endpoint_service_host_with_port(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:8000' - -def test_endpoint_service_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.EndpointServiceGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_endpoint_service_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.EndpointServiceGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.EndpointServiceGrpcTransport, transports.EndpointServiceGrpcAsyncIOTransport]) -def test_endpoint_service_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.EndpointServiceGrpcTransport, transports.EndpointServiceGrpcAsyncIOTransport]) -def test_endpoint_service_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_endpoint_service_grpc_lro_client(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_endpoint_service_grpc_lro_async_client(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc_asyncio', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsAsyncClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_endpoint_path(): - project = "squid" - location = "clam" - endpoint = "whelk" - expected = "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) - actual = EndpointServiceClient.endpoint_path(project, location, endpoint) - assert expected == actual - - -def test_parse_endpoint_path(): - expected = { - "project": "octopus", - "location": "oyster", - "endpoint": "nudibranch", - } - path = EndpointServiceClient.endpoint_path(**expected) - - # Check that the path construction is reversible. - actual = EndpointServiceClient.parse_endpoint_path(path) - assert expected == actual - -def test_model_path(): - project = "cuttlefish" - location = "mussel" - model = "winkle" - expected = "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) - actual = EndpointServiceClient.model_path(project, location, model) - assert expected == actual - - -def test_parse_model_path(): - expected = { - "project": "nautilus", - "location": "scallop", - "model": "abalone", - } - path = EndpointServiceClient.model_path(**expected) - - # Check that the path construction is reversible. - actual = EndpointServiceClient.parse_model_path(path) - assert expected == actual - -def test_model_deployment_monitoring_job_path(): - project = "squid" - location = "clam" - model_deployment_monitoring_job = "whelk" - expected = "projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}".format(project=project, location=location, model_deployment_monitoring_job=model_deployment_monitoring_job, ) - actual = EndpointServiceClient.model_deployment_monitoring_job_path(project, location, model_deployment_monitoring_job) - assert expected == actual - - -def test_parse_model_deployment_monitoring_job_path(): - expected = { - "project": "octopus", - "location": "oyster", - "model_deployment_monitoring_job": "nudibranch", - } - path = EndpointServiceClient.model_deployment_monitoring_job_path(**expected) - - # Check that the path construction is reversible. - actual = EndpointServiceClient.parse_model_deployment_monitoring_job_path(path) - assert expected == actual - -def test_network_path(): - project = "cuttlefish" - network = "mussel" - expected = "projects/{project}/global/networks/{network}".format(project=project, network=network, ) - actual = EndpointServiceClient.network_path(project, network) - assert expected == actual - - -def test_parse_network_path(): - expected = { - "project": "winkle", - "network": "nautilus", - } - path = EndpointServiceClient.network_path(**expected) - - # Check that the path construction is reversible. - actual = EndpointServiceClient.parse_network_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "scallop" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = EndpointServiceClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "abalone", - } - path = EndpointServiceClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = EndpointServiceClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "squid" - expected = "folders/{folder}".format(folder=folder, ) - actual = EndpointServiceClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "clam", - } - path = EndpointServiceClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = EndpointServiceClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "whelk" - expected = "organizations/{organization}".format(organization=organization, ) - actual = EndpointServiceClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "octopus", - } - path = EndpointServiceClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = EndpointServiceClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "oyster" - expected = "projects/{project}".format(project=project, ) - actual = EndpointServiceClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "nudibranch", - } - path = EndpointServiceClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = EndpointServiceClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "cuttlefish" - location = "mussel" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = EndpointServiceClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "winkle", - "location": "nautilus", - } - path = EndpointServiceClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = EndpointServiceClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.EndpointServiceTransport, '_prep_wrapped_messages') as prep: - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.EndpointServiceTransport, '_prep_wrapped_messages') as prep: - transport_class = EndpointServiceClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - -@pytest.mark.asyncio -async def test_transport_close_async(): - client = EndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: - async with client: - close.assert_not_called() - close.assert_called_once() - -def test_transport_close(): - transports = { - "grpc": "_grpc_channel", - } - - for transport, close_name in transports.items(): - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: - with client: - close.assert_not_called() - close.assert_called_once() - -def test_client_ctx(): - transports = [ - 'grpc', - ] - for transport in transports: - client = EndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - # Test client calls underlying transport. - with mock.patch.object(type(client.transport), "close") as close: - close.assert_not_called() - with client: - pass - close.assert_called() diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_online_serving_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_online_serving_service.py deleted file mode 100644 index 1f7b04d812..0000000000 --- a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_online_serving_service.py +++ /dev/null @@ -1,1382 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import path_template -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service import FeaturestoreOnlineServingServiceAsyncClient -from google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service import FeaturestoreOnlineServingServiceClient -from google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service import transports -from google.cloud.aiplatform_v1beta1.types import feature_selector -from google.cloud.aiplatform_v1beta1.types import featurestore_online_service -from google.oauth2 import service_account -import google.auth - - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(None) is None - assert FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert FeaturestoreOnlineServingServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - FeaturestoreOnlineServingServiceClient, - FeaturestoreOnlineServingServiceAsyncClient, -]) -def test_featurestore_online_serving_service_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.FeaturestoreOnlineServingServiceGrpcTransport, "grpc"), - (transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_featurestore_online_serving_service_client_service_account_always_use_jwt(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=False) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("client_class", [ - FeaturestoreOnlineServingServiceClient, - FeaturestoreOnlineServingServiceAsyncClient, -]) -def test_featurestore_online_serving_service_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_featurestore_online_serving_service_client_get_transport_class(): - transport = FeaturestoreOnlineServingServiceClient.get_transport_class() - available_transports = [ - transports.FeaturestoreOnlineServingServiceGrpcTransport, - ] - assert transport in available_transports - - transport = FeaturestoreOnlineServingServiceClient.get_transport_class("grpc") - assert transport == transports.FeaturestoreOnlineServingServiceGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (FeaturestoreOnlineServingServiceClient, transports.FeaturestoreOnlineServingServiceGrpcTransport, "grpc"), - (FeaturestoreOnlineServingServiceAsyncClient, transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(FeaturestoreOnlineServingServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FeaturestoreOnlineServingServiceClient)) -@mock.patch.object(FeaturestoreOnlineServingServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FeaturestoreOnlineServingServiceAsyncClient)) -def test_featurestore_online_serving_service_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(FeaturestoreOnlineServingServiceClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(FeaturestoreOnlineServingServiceClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (FeaturestoreOnlineServingServiceClient, transports.FeaturestoreOnlineServingServiceGrpcTransport, "grpc", "true"), - (FeaturestoreOnlineServingServiceAsyncClient, transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (FeaturestoreOnlineServingServiceClient, transports.FeaturestoreOnlineServingServiceGrpcTransport, "grpc", "false"), - (FeaturestoreOnlineServingServiceAsyncClient, transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(FeaturestoreOnlineServingServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FeaturestoreOnlineServingServiceClient)) -@mock.patch.object(FeaturestoreOnlineServingServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FeaturestoreOnlineServingServiceAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_featurestore_online_serving_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (FeaturestoreOnlineServingServiceClient, transports.FeaturestoreOnlineServingServiceGrpcTransport, "grpc"), - (FeaturestoreOnlineServingServiceAsyncClient, transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_featurestore_online_serving_service_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (FeaturestoreOnlineServingServiceClient, transports.FeaturestoreOnlineServingServiceGrpcTransport, "grpc"), - (FeaturestoreOnlineServingServiceAsyncClient, transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_featurestore_online_serving_service_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_featurestore_online_serving_service_client_client_options_from_dict(): - with mock.patch('google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service.transports.FeaturestoreOnlineServingServiceGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = FeaturestoreOnlineServingServiceClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_read_feature_values(transport: str = 'grpc', request_type=featurestore_online_service.ReadFeatureValuesRequest): - client = FeaturestoreOnlineServingServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_feature_values), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = featurestore_online_service.ReadFeatureValuesResponse( - ) - response = client.read_feature_values(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_online_service.ReadFeatureValuesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, featurestore_online_service.ReadFeatureValuesResponse) - - -def test_read_feature_values_from_dict(): - test_read_feature_values(request_type=dict) - - -def test_read_feature_values_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreOnlineServingServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_feature_values), - '__call__') as call: - client.read_feature_values() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_online_service.ReadFeatureValuesRequest() - - -@pytest.mark.asyncio -async def test_read_feature_values_async(transport: str = 'grpc_asyncio', request_type=featurestore_online_service.ReadFeatureValuesRequest): - client = FeaturestoreOnlineServingServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_feature_values), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(featurestore_online_service.ReadFeatureValuesResponse( - )) - response = await client.read_feature_values(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_online_service.ReadFeatureValuesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, featurestore_online_service.ReadFeatureValuesResponse) - - -@pytest.mark.asyncio -async def test_read_feature_values_async_from_dict(): - await test_read_feature_values_async(request_type=dict) - - -def test_read_feature_values_field_headers(): - client = FeaturestoreOnlineServingServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_online_service.ReadFeatureValuesRequest() - - request.entity_type = 'entity_type/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_feature_values), - '__call__') as call: - call.return_value = featurestore_online_service.ReadFeatureValuesResponse() - client.read_feature_values(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'entity_type=entity_type/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_read_feature_values_field_headers_async(): - client = FeaturestoreOnlineServingServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_online_service.ReadFeatureValuesRequest() - - request.entity_type = 'entity_type/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_feature_values), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_online_service.ReadFeatureValuesResponse()) - await client.read_feature_values(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'entity_type=entity_type/value', - ) in kw['metadata'] - - -def test_read_feature_values_flattened(): - client = FeaturestoreOnlineServingServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_feature_values), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = featurestore_online_service.ReadFeatureValuesResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.read_feature_values( - entity_type='entity_type_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].entity_type - mock_val = 'entity_type_value' - assert arg == mock_val - - -def test_read_feature_values_flattened_error(): - client = FeaturestoreOnlineServingServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.read_feature_values( - featurestore_online_service.ReadFeatureValuesRequest(), - entity_type='entity_type_value', - ) - - -@pytest.mark.asyncio -async def test_read_feature_values_flattened_async(): - client = FeaturestoreOnlineServingServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_feature_values), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = featurestore_online_service.ReadFeatureValuesResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_online_service.ReadFeatureValuesResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.read_feature_values( - entity_type='entity_type_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].entity_type - mock_val = 'entity_type_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_read_feature_values_flattened_error_async(): - client = FeaturestoreOnlineServingServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.read_feature_values( - featurestore_online_service.ReadFeatureValuesRequest(), - entity_type='entity_type_value', - ) - - -def test_streaming_read_feature_values(transport: str = 'grpc', request_type=featurestore_online_service.StreamingReadFeatureValuesRequest): - client = FeaturestoreOnlineServingServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.streaming_read_feature_values), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = iter([featurestore_online_service.ReadFeatureValuesResponse()]) - response = client.streaming_read_feature_values(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_online_service.StreamingReadFeatureValuesRequest() - - # Establish that the response is the type that we expect. - for message in response: - assert isinstance(message, featurestore_online_service.ReadFeatureValuesResponse) - - -def test_streaming_read_feature_values_from_dict(): - test_streaming_read_feature_values(request_type=dict) - - -def test_streaming_read_feature_values_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreOnlineServingServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.streaming_read_feature_values), - '__call__') as call: - client.streaming_read_feature_values() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_online_service.StreamingReadFeatureValuesRequest() - - -@pytest.mark.asyncio -async def test_streaming_read_feature_values_async(transport: str = 'grpc_asyncio', request_type=featurestore_online_service.StreamingReadFeatureValuesRequest): - client = FeaturestoreOnlineServingServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.streaming_read_feature_values), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) - call.return_value.read = mock.AsyncMock(side_effect=[featurestore_online_service.ReadFeatureValuesResponse()]) - response = await client.streaming_read_feature_values(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_online_service.StreamingReadFeatureValuesRequest() - - # Establish that the response is the type that we expect. - message = await response.read() - assert isinstance(message, featurestore_online_service.ReadFeatureValuesResponse) - - -@pytest.mark.asyncio -async def test_streaming_read_feature_values_async_from_dict(): - await test_streaming_read_feature_values_async(request_type=dict) - - -def test_streaming_read_feature_values_field_headers(): - client = FeaturestoreOnlineServingServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_online_service.StreamingReadFeatureValuesRequest() - - request.entity_type = 'entity_type/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.streaming_read_feature_values), - '__call__') as call: - call.return_value = iter([featurestore_online_service.ReadFeatureValuesResponse()]) - client.streaming_read_feature_values(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'entity_type=entity_type/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_streaming_read_feature_values_field_headers_async(): - client = FeaturestoreOnlineServingServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_online_service.StreamingReadFeatureValuesRequest() - - request.entity_type = 'entity_type/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.streaming_read_feature_values), - '__call__') as call: - call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) - call.return_value.read = mock.AsyncMock(side_effect=[featurestore_online_service.ReadFeatureValuesResponse()]) - await client.streaming_read_feature_values(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'entity_type=entity_type/value', - ) in kw['metadata'] - - -def test_streaming_read_feature_values_flattened(): - client = FeaturestoreOnlineServingServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.streaming_read_feature_values), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = iter([featurestore_online_service.ReadFeatureValuesResponse()]) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.streaming_read_feature_values( - entity_type='entity_type_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].entity_type - mock_val = 'entity_type_value' - assert arg == mock_val - - -def test_streaming_read_feature_values_flattened_error(): - client = FeaturestoreOnlineServingServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.streaming_read_feature_values( - featurestore_online_service.StreamingReadFeatureValuesRequest(), - entity_type='entity_type_value', - ) - - -@pytest.mark.asyncio -async def test_streaming_read_feature_values_flattened_async(): - client = FeaturestoreOnlineServingServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.streaming_read_feature_values), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = iter([featurestore_online_service.ReadFeatureValuesResponse()]) - - call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.streaming_read_feature_values( - entity_type='entity_type_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].entity_type - mock_val = 'entity_type_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_streaming_read_feature_values_flattened_error_async(): - client = FeaturestoreOnlineServingServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.streaming_read_feature_values( - featurestore_online_service.StreamingReadFeatureValuesRequest(), - entity_type='entity_type_value', - ) - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.FeaturestoreOnlineServingServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = FeaturestoreOnlineServingServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.FeaturestoreOnlineServingServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = FeaturestoreOnlineServingServiceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.FeaturestoreOnlineServingServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = FeaturestoreOnlineServingServiceClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.FeaturestoreOnlineServingServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = FeaturestoreOnlineServingServiceClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.FeaturestoreOnlineServingServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.FeaturestoreOnlineServingServiceGrpcTransport, - transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = FeaturestoreOnlineServingServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.FeaturestoreOnlineServingServiceGrpcTransport, - ) - -def test_featurestore_online_serving_service_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.FeaturestoreOnlineServingServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_featurestore_online_serving_service_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service.transports.FeaturestoreOnlineServingServiceTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.FeaturestoreOnlineServingServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'read_feature_values', - 'streaming_read_feature_values', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - with pytest.raises(NotImplementedError): - transport.close() - - -def test_featurestore_online_serving_service_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service.transports.FeaturestoreOnlineServingServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.FeaturestoreOnlineServingServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -def test_featurestore_online_serving_service_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.featurestore_online_serving_service.transports.FeaturestoreOnlineServingServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.FeaturestoreOnlineServingServiceTransport() - adc.assert_called_once() - - -def test_featurestore_online_serving_service_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - FeaturestoreOnlineServingServiceClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.FeaturestoreOnlineServingServiceGrpcTransport, - transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, - ], -) -def test_featurestore_online_serving_service_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.FeaturestoreOnlineServingServiceGrpcTransport, grpc_helpers), - (transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_featurestore_online_serving_service_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "aiplatform.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="aiplatform.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.FeaturestoreOnlineServingServiceGrpcTransport, transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport]) -def test_featurestore_online_serving_service_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_featurestore_online_serving_service_host_no_port(): - client = FeaturestoreOnlineServingServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_featurestore_online_serving_service_host_with_port(): - client = FeaturestoreOnlineServingServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:8000' - -def test_featurestore_online_serving_service_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.FeaturestoreOnlineServingServiceGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_featurestore_online_serving_service_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.FeaturestoreOnlineServingServiceGrpcTransport, transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport]) -def test_featurestore_online_serving_service_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.FeaturestoreOnlineServingServiceGrpcTransport, transports.FeaturestoreOnlineServingServiceGrpcAsyncIOTransport]) -def test_featurestore_online_serving_service_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_entity_type_path(): - project = "squid" - location = "clam" - featurestore = "whelk" - entity_type = "octopus" - expected = "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}".format(project=project, location=location, featurestore=featurestore, entity_type=entity_type, ) - actual = FeaturestoreOnlineServingServiceClient.entity_type_path(project, location, featurestore, entity_type) - assert expected == actual - - -def test_parse_entity_type_path(): - expected = { - "project": "oyster", - "location": "nudibranch", - "featurestore": "cuttlefish", - "entity_type": "mussel", - } - path = FeaturestoreOnlineServingServiceClient.entity_type_path(**expected) - - # Check that the path construction is reversible. - actual = FeaturestoreOnlineServingServiceClient.parse_entity_type_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "winkle" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = FeaturestoreOnlineServingServiceClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "nautilus", - } - path = FeaturestoreOnlineServingServiceClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = FeaturestoreOnlineServingServiceClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "scallop" - expected = "folders/{folder}".format(folder=folder, ) - actual = FeaturestoreOnlineServingServiceClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "abalone", - } - path = FeaturestoreOnlineServingServiceClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = FeaturestoreOnlineServingServiceClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "squid" - expected = "organizations/{organization}".format(organization=organization, ) - actual = FeaturestoreOnlineServingServiceClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "clam", - } - path = FeaturestoreOnlineServingServiceClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = FeaturestoreOnlineServingServiceClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "whelk" - expected = "projects/{project}".format(project=project, ) - actual = FeaturestoreOnlineServingServiceClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "octopus", - } - path = FeaturestoreOnlineServingServiceClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = FeaturestoreOnlineServingServiceClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "oyster" - location = "nudibranch" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = FeaturestoreOnlineServingServiceClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "cuttlefish", - "location": "mussel", - } - path = FeaturestoreOnlineServingServiceClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = FeaturestoreOnlineServingServiceClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.FeaturestoreOnlineServingServiceTransport, '_prep_wrapped_messages') as prep: - client = FeaturestoreOnlineServingServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.FeaturestoreOnlineServingServiceTransport, '_prep_wrapped_messages') as prep: - transport_class = FeaturestoreOnlineServingServiceClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - -@pytest.mark.asyncio -async def test_transport_close_async(): - client = FeaturestoreOnlineServingServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: - async with client: - close.assert_not_called() - close.assert_called_once() - -def test_transport_close(): - transports = { - "grpc": "_grpc_channel", - } - - for transport, close_name in transports.items(): - client = FeaturestoreOnlineServingServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: - with client: - close.assert_not_called() - close.assert_called_once() - -def test_client_ctx(): - transports = [ - 'grpc', - ] - for transport in transports: - client = FeaturestoreOnlineServingServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - # Test client calls underlying transport. - with mock.patch.object(type(client.transport), "close") as close: - close.assert_not_called() - with client: - pass - close.assert_called() diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_service.py deleted file mode 100644 index 3569bdc62f..0000000000 --- a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_service.py +++ /dev/null @@ -1,6641 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import future -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import operation_async # type: ignore -from google.api_core import operations_v1 -from google.api_core import path_template -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.aiplatform_v1beta1.services.featurestore_service import FeaturestoreServiceAsyncClient -from google.cloud.aiplatform_v1beta1.services.featurestore_service import FeaturestoreServiceClient -from google.cloud.aiplatform_v1beta1.services.featurestore_service import pagers -from google.cloud.aiplatform_v1beta1.services.featurestore_service import transports -from google.cloud.aiplatform_v1beta1.types import encryption_spec -from google.cloud.aiplatform_v1beta1.types import entity_type -from google.cloud.aiplatform_v1beta1.types import entity_type as gca_entity_type -from google.cloud.aiplatform_v1beta1.types import feature -from google.cloud.aiplatform_v1beta1.types import feature as gca_feature -from google.cloud.aiplatform_v1beta1.types import feature_monitoring_stats -from google.cloud.aiplatform_v1beta1.types import feature_selector -from google.cloud.aiplatform_v1beta1.types import featurestore -from google.cloud.aiplatform_v1beta1.types import featurestore as gca_featurestore -from google.cloud.aiplatform_v1beta1.types import featurestore_monitoring -from google.cloud.aiplatform_v1beta1.types import featurestore_service -from google.cloud.aiplatform_v1beta1.types import io -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.longrunning import operations_pb2 -from google.oauth2 import service_account -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -import google.auth - - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert FeaturestoreServiceClient._get_default_mtls_endpoint(None) is None - assert FeaturestoreServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert FeaturestoreServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert FeaturestoreServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert FeaturestoreServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert FeaturestoreServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - FeaturestoreServiceClient, - FeaturestoreServiceAsyncClient, -]) -def test_featurestore_service_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.FeaturestoreServiceGrpcTransport, "grpc"), - (transports.FeaturestoreServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_featurestore_service_client_service_account_always_use_jwt(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=False) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("client_class", [ - FeaturestoreServiceClient, - FeaturestoreServiceAsyncClient, -]) -def test_featurestore_service_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_featurestore_service_client_get_transport_class(): - transport = FeaturestoreServiceClient.get_transport_class() - available_transports = [ - transports.FeaturestoreServiceGrpcTransport, - ] - assert transport in available_transports - - transport = FeaturestoreServiceClient.get_transport_class("grpc") - assert transport == transports.FeaturestoreServiceGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (FeaturestoreServiceClient, transports.FeaturestoreServiceGrpcTransport, "grpc"), - (FeaturestoreServiceAsyncClient, transports.FeaturestoreServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(FeaturestoreServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FeaturestoreServiceClient)) -@mock.patch.object(FeaturestoreServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FeaturestoreServiceAsyncClient)) -def test_featurestore_service_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(FeaturestoreServiceClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(FeaturestoreServiceClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (FeaturestoreServiceClient, transports.FeaturestoreServiceGrpcTransport, "grpc", "true"), - (FeaturestoreServiceAsyncClient, transports.FeaturestoreServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (FeaturestoreServiceClient, transports.FeaturestoreServiceGrpcTransport, "grpc", "false"), - (FeaturestoreServiceAsyncClient, transports.FeaturestoreServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(FeaturestoreServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FeaturestoreServiceClient)) -@mock.patch.object(FeaturestoreServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FeaturestoreServiceAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_featurestore_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (FeaturestoreServiceClient, transports.FeaturestoreServiceGrpcTransport, "grpc"), - (FeaturestoreServiceAsyncClient, transports.FeaturestoreServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_featurestore_service_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (FeaturestoreServiceClient, transports.FeaturestoreServiceGrpcTransport, "grpc"), - (FeaturestoreServiceAsyncClient, transports.FeaturestoreServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_featurestore_service_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_featurestore_service_client_client_options_from_dict(): - with mock.patch('google.cloud.aiplatform_v1beta1.services.featurestore_service.transports.FeaturestoreServiceGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = FeaturestoreServiceClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_create_featurestore(transport: str = 'grpc', request_type=featurestore_service.CreateFeaturestoreRequest): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_featurestore), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.create_featurestore(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.CreateFeaturestoreRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_create_featurestore_from_dict(): - test_create_featurestore(request_type=dict) - - -def test_create_featurestore_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_featurestore), - '__call__') as call: - client.create_featurestore() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.CreateFeaturestoreRequest() - - -@pytest.mark.asyncio -async def test_create_featurestore_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.CreateFeaturestoreRequest): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_featurestore), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.create_featurestore(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.CreateFeaturestoreRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_create_featurestore_async_from_dict(): - await test_create_featurestore_async(request_type=dict) - - -def test_create_featurestore_field_headers(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.CreateFeaturestoreRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_featurestore), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.create_featurestore(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_featurestore_field_headers_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.CreateFeaturestoreRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_featurestore), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.create_featurestore(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_featurestore_flattened(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_featurestore), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_featurestore( - parent='parent_value', - featurestore=gca_featurestore.Featurestore(name='name_value'), - featurestore_id='featurestore_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].featurestore - mock_val = gca_featurestore.Featurestore(name='name_value') - assert arg == mock_val - arg = args[0].featurestore_id - mock_val = 'featurestore_id_value' - assert arg == mock_val - - -def test_create_featurestore_flattened_error(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_featurestore( - featurestore_service.CreateFeaturestoreRequest(), - parent='parent_value', - featurestore=gca_featurestore.Featurestore(name='name_value'), - featurestore_id='featurestore_id_value', - ) - - -@pytest.mark.asyncio -async def test_create_featurestore_flattened_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_featurestore), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_featurestore( - parent='parent_value', - featurestore=gca_featurestore.Featurestore(name='name_value'), - featurestore_id='featurestore_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].featurestore - mock_val = gca_featurestore.Featurestore(name='name_value') - assert arg == mock_val - arg = args[0].featurestore_id - mock_val = 'featurestore_id_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_featurestore_flattened_error_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_featurestore( - featurestore_service.CreateFeaturestoreRequest(), - parent='parent_value', - featurestore=gca_featurestore.Featurestore(name='name_value'), - featurestore_id='featurestore_id_value', - ) - - -def test_get_featurestore(transport: str = 'grpc', request_type=featurestore_service.GetFeaturestoreRequest): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_featurestore), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = featurestore.Featurestore( - name='name_value', - etag='etag_value', - state=featurestore.Featurestore.State.STABLE, - ) - response = client.get_featurestore(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.GetFeaturestoreRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, featurestore.Featurestore) - assert response.name == 'name_value' - assert response.etag == 'etag_value' - assert response.state == featurestore.Featurestore.State.STABLE - - -def test_get_featurestore_from_dict(): - test_get_featurestore(request_type=dict) - - -def test_get_featurestore_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_featurestore), - '__call__') as call: - client.get_featurestore() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.GetFeaturestoreRequest() - - -@pytest.mark.asyncio -async def test_get_featurestore_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.GetFeaturestoreRequest): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_featurestore), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(featurestore.Featurestore( - name='name_value', - etag='etag_value', - state=featurestore.Featurestore.State.STABLE, - )) - response = await client.get_featurestore(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.GetFeaturestoreRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, featurestore.Featurestore) - assert response.name == 'name_value' - assert response.etag == 'etag_value' - assert response.state == featurestore.Featurestore.State.STABLE - - -@pytest.mark.asyncio -async def test_get_featurestore_async_from_dict(): - await test_get_featurestore_async(request_type=dict) - - -def test_get_featurestore_field_headers(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.GetFeaturestoreRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_featurestore), - '__call__') as call: - call.return_value = featurestore.Featurestore() - client.get_featurestore(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_featurestore_field_headers_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.GetFeaturestoreRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_featurestore), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore.Featurestore()) - await client.get_featurestore(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_featurestore_flattened(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_featurestore), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = featurestore.Featurestore() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_featurestore( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_featurestore_flattened_error(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_featurestore( - featurestore_service.GetFeaturestoreRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_featurestore_flattened_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_featurestore), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = featurestore.Featurestore() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore.Featurestore()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_featurestore( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_featurestore_flattened_error_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_featurestore( - featurestore_service.GetFeaturestoreRequest(), - name='name_value', - ) - - -def test_list_featurestores(transport: str = 'grpc', request_type=featurestore_service.ListFeaturestoresRequest): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_featurestores), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = featurestore_service.ListFeaturestoresResponse( - next_page_token='next_page_token_value', - ) - response = client.list_featurestores(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.ListFeaturestoresRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListFeaturestoresPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_featurestores_from_dict(): - test_list_featurestores(request_type=dict) - - -def test_list_featurestores_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_featurestores), - '__call__') as call: - client.list_featurestores() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.ListFeaturestoresRequest() - - -@pytest.mark.asyncio -async def test_list_featurestores_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.ListFeaturestoresRequest): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_featurestores), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.ListFeaturestoresResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_featurestores(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.ListFeaturestoresRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListFeaturestoresAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_featurestores_async_from_dict(): - await test_list_featurestores_async(request_type=dict) - - -def test_list_featurestores_field_headers(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.ListFeaturestoresRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_featurestores), - '__call__') as call: - call.return_value = featurestore_service.ListFeaturestoresResponse() - client.list_featurestores(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_featurestores_field_headers_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.ListFeaturestoresRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_featurestores), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.ListFeaturestoresResponse()) - await client.list_featurestores(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_featurestores_flattened(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_featurestores), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = featurestore_service.ListFeaturestoresResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_featurestores( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_featurestores_flattened_error(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_featurestores( - featurestore_service.ListFeaturestoresRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_featurestores_flattened_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_featurestores), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = featurestore_service.ListFeaturestoresResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.ListFeaturestoresResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_featurestores( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_featurestores_flattened_error_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_featurestores( - featurestore_service.ListFeaturestoresRequest(), - parent='parent_value', - ) - - -def test_list_featurestores_pager(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_featurestores), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - featurestore_service.ListFeaturestoresResponse( - featurestores=[ - featurestore.Featurestore(), - featurestore.Featurestore(), - featurestore.Featurestore(), - ], - next_page_token='abc', - ), - featurestore_service.ListFeaturestoresResponse( - featurestores=[], - next_page_token='def', - ), - featurestore_service.ListFeaturestoresResponse( - featurestores=[ - featurestore.Featurestore(), - ], - next_page_token='ghi', - ), - featurestore_service.ListFeaturestoresResponse( - featurestores=[ - featurestore.Featurestore(), - featurestore.Featurestore(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_featurestores(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, featurestore.Featurestore) - for i in results) - -def test_list_featurestores_pages(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_featurestores), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - featurestore_service.ListFeaturestoresResponse( - featurestores=[ - featurestore.Featurestore(), - featurestore.Featurestore(), - featurestore.Featurestore(), - ], - next_page_token='abc', - ), - featurestore_service.ListFeaturestoresResponse( - featurestores=[], - next_page_token='def', - ), - featurestore_service.ListFeaturestoresResponse( - featurestores=[ - featurestore.Featurestore(), - ], - next_page_token='ghi', - ), - featurestore_service.ListFeaturestoresResponse( - featurestores=[ - featurestore.Featurestore(), - featurestore.Featurestore(), - ], - ), - RuntimeError, - ) - pages = list(client.list_featurestores(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_featurestores_async_pager(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_featurestores), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - featurestore_service.ListFeaturestoresResponse( - featurestores=[ - featurestore.Featurestore(), - featurestore.Featurestore(), - featurestore.Featurestore(), - ], - next_page_token='abc', - ), - featurestore_service.ListFeaturestoresResponse( - featurestores=[], - next_page_token='def', - ), - featurestore_service.ListFeaturestoresResponse( - featurestores=[ - featurestore.Featurestore(), - ], - next_page_token='ghi', - ), - featurestore_service.ListFeaturestoresResponse( - featurestores=[ - featurestore.Featurestore(), - featurestore.Featurestore(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_featurestores(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, featurestore.Featurestore) - for i in responses) - -@pytest.mark.asyncio -async def test_list_featurestores_async_pages(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_featurestores), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - featurestore_service.ListFeaturestoresResponse( - featurestores=[ - featurestore.Featurestore(), - featurestore.Featurestore(), - featurestore.Featurestore(), - ], - next_page_token='abc', - ), - featurestore_service.ListFeaturestoresResponse( - featurestores=[], - next_page_token='def', - ), - featurestore_service.ListFeaturestoresResponse( - featurestores=[ - featurestore.Featurestore(), - ], - next_page_token='ghi', - ), - featurestore_service.ListFeaturestoresResponse( - featurestores=[ - featurestore.Featurestore(), - featurestore.Featurestore(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_featurestores(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_update_featurestore(transport: str = 'grpc', request_type=featurestore_service.UpdateFeaturestoreRequest): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_featurestore), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.update_featurestore(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.UpdateFeaturestoreRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_update_featurestore_from_dict(): - test_update_featurestore(request_type=dict) - - -def test_update_featurestore_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_featurestore), - '__call__') as call: - client.update_featurestore() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.UpdateFeaturestoreRequest() - - -@pytest.mark.asyncio -async def test_update_featurestore_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.UpdateFeaturestoreRequest): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_featurestore), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.update_featurestore(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.UpdateFeaturestoreRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_update_featurestore_async_from_dict(): - await test_update_featurestore_async(request_type=dict) - - -def test_update_featurestore_field_headers(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.UpdateFeaturestoreRequest() - - request.featurestore.name = 'featurestore.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_featurestore), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.update_featurestore(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'featurestore.name=featurestore.name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_featurestore_field_headers_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.UpdateFeaturestoreRequest() - - request.featurestore.name = 'featurestore.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_featurestore), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.update_featurestore(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'featurestore.name=featurestore.name/value', - ) in kw['metadata'] - - -def test_update_featurestore_flattened(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_featurestore), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_featurestore( - featurestore=gca_featurestore.Featurestore(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].featurestore - mock_val = gca_featurestore.Featurestore(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -def test_update_featurestore_flattened_error(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_featurestore( - featurestore_service.UpdateFeaturestoreRequest(), - featurestore=gca_featurestore.Featurestore(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.asyncio -async def test_update_featurestore_flattened_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_featurestore), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_featurestore( - featurestore=gca_featurestore.Featurestore(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].featurestore - mock_val = gca_featurestore.Featurestore(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_update_featurestore_flattened_error_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_featurestore( - featurestore_service.UpdateFeaturestoreRequest(), - featurestore=gca_featurestore.Featurestore(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_delete_featurestore(transport: str = 'grpc', request_type=featurestore_service.DeleteFeaturestoreRequest): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_featurestore), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_featurestore(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.DeleteFeaturestoreRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_featurestore_from_dict(): - test_delete_featurestore(request_type=dict) - - -def test_delete_featurestore_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_featurestore), - '__call__') as call: - client.delete_featurestore() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.DeleteFeaturestoreRequest() - - -@pytest.mark.asyncio -async def test_delete_featurestore_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.DeleteFeaturestoreRequest): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_featurestore), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_featurestore(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.DeleteFeaturestoreRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_featurestore_async_from_dict(): - await test_delete_featurestore_async(request_type=dict) - - -def test_delete_featurestore_field_headers(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.DeleteFeaturestoreRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_featurestore), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_featurestore(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_featurestore_field_headers_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.DeleteFeaturestoreRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_featurestore), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_featurestore(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_featurestore_flattened(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_featurestore), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_featurestore( - name='name_value', - force=True, - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - arg = args[0].force - mock_val = True - assert arg == mock_val - - -def test_delete_featurestore_flattened_error(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_featurestore( - featurestore_service.DeleteFeaturestoreRequest(), - name='name_value', - force=True, - ) - - -@pytest.mark.asyncio -async def test_delete_featurestore_flattened_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_featurestore), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_featurestore( - name='name_value', - force=True, - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - arg = args[0].force - mock_val = True - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_featurestore_flattened_error_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_featurestore( - featurestore_service.DeleteFeaturestoreRequest(), - name='name_value', - force=True, - ) - - -def test_create_entity_type(transport: str = 'grpc', request_type=featurestore_service.CreateEntityTypeRequest): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_entity_type), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.create_entity_type(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.CreateEntityTypeRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_create_entity_type_from_dict(): - test_create_entity_type(request_type=dict) - - -def test_create_entity_type_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_entity_type), - '__call__') as call: - client.create_entity_type() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.CreateEntityTypeRequest() - - -@pytest.mark.asyncio -async def test_create_entity_type_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.CreateEntityTypeRequest): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_entity_type), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.create_entity_type(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.CreateEntityTypeRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_create_entity_type_async_from_dict(): - await test_create_entity_type_async(request_type=dict) - - -def test_create_entity_type_field_headers(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.CreateEntityTypeRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_entity_type), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.create_entity_type(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_entity_type_field_headers_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.CreateEntityTypeRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_entity_type), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.create_entity_type(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_entity_type_flattened(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_entity_type), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_entity_type( - parent='parent_value', - entity_type=gca_entity_type.EntityType(name='name_value'), - entity_type_id='entity_type_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].entity_type - mock_val = gca_entity_type.EntityType(name='name_value') - assert arg == mock_val - arg = args[0].entity_type_id - mock_val = 'entity_type_id_value' - assert arg == mock_val - - -def test_create_entity_type_flattened_error(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_entity_type( - featurestore_service.CreateEntityTypeRequest(), - parent='parent_value', - entity_type=gca_entity_type.EntityType(name='name_value'), - entity_type_id='entity_type_id_value', - ) - - -@pytest.mark.asyncio -async def test_create_entity_type_flattened_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_entity_type), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_entity_type( - parent='parent_value', - entity_type=gca_entity_type.EntityType(name='name_value'), - entity_type_id='entity_type_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].entity_type - mock_val = gca_entity_type.EntityType(name='name_value') - assert arg == mock_val - arg = args[0].entity_type_id - mock_val = 'entity_type_id_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_entity_type_flattened_error_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_entity_type( - featurestore_service.CreateEntityTypeRequest(), - parent='parent_value', - entity_type=gca_entity_type.EntityType(name='name_value'), - entity_type_id='entity_type_id_value', - ) - - -def test_get_entity_type(transport: str = 'grpc', request_type=featurestore_service.GetEntityTypeRequest): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_entity_type), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = entity_type.EntityType( - name='name_value', - description='description_value', - etag='etag_value', - ) - response = client.get_entity_type(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.GetEntityTypeRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, entity_type.EntityType) - assert response.name == 'name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - - -def test_get_entity_type_from_dict(): - test_get_entity_type(request_type=dict) - - -def test_get_entity_type_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_entity_type), - '__call__') as call: - client.get_entity_type() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.GetEntityTypeRequest() - - -@pytest.mark.asyncio -async def test_get_entity_type_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.GetEntityTypeRequest): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_entity_type), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(entity_type.EntityType( - name='name_value', - description='description_value', - etag='etag_value', - )) - response = await client.get_entity_type(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.GetEntityTypeRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, entity_type.EntityType) - assert response.name == 'name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_get_entity_type_async_from_dict(): - await test_get_entity_type_async(request_type=dict) - - -def test_get_entity_type_field_headers(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.GetEntityTypeRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_entity_type), - '__call__') as call: - call.return_value = entity_type.EntityType() - client.get_entity_type(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_entity_type_field_headers_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.GetEntityTypeRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_entity_type), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(entity_type.EntityType()) - await client.get_entity_type(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_entity_type_flattened(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_entity_type), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = entity_type.EntityType() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_entity_type( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_entity_type_flattened_error(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_entity_type( - featurestore_service.GetEntityTypeRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_entity_type_flattened_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_entity_type), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = entity_type.EntityType() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(entity_type.EntityType()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_entity_type( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_entity_type_flattened_error_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_entity_type( - featurestore_service.GetEntityTypeRequest(), - name='name_value', - ) - - -def test_list_entity_types(transport: str = 'grpc', request_type=featurestore_service.ListEntityTypesRequest): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_entity_types), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = featurestore_service.ListEntityTypesResponse( - next_page_token='next_page_token_value', - ) - response = client.list_entity_types(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.ListEntityTypesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListEntityTypesPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_entity_types_from_dict(): - test_list_entity_types(request_type=dict) - - -def test_list_entity_types_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_entity_types), - '__call__') as call: - client.list_entity_types() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.ListEntityTypesRequest() - - -@pytest.mark.asyncio -async def test_list_entity_types_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.ListEntityTypesRequest): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_entity_types), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.ListEntityTypesResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_entity_types(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.ListEntityTypesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListEntityTypesAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_entity_types_async_from_dict(): - await test_list_entity_types_async(request_type=dict) - - -def test_list_entity_types_field_headers(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.ListEntityTypesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_entity_types), - '__call__') as call: - call.return_value = featurestore_service.ListEntityTypesResponse() - client.list_entity_types(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_entity_types_field_headers_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.ListEntityTypesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_entity_types), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.ListEntityTypesResponse()) - await client.list_entity_types(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_entity_types_flattened(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_entity_types), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = featurestore_service.ListEntityTypesResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_entity_types( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_entity_types_flattened_error(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_entity_types( - featurestore_service.ListEntityTypesRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_entity_types_flattened_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_entity_types), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = featurestore_service.ListEntityTypesResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.ListEntityTypesResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_entity_types( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_entity_types_flattened_error_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_entity_types( - featurestore_service.ListEntityTypesRequest(), - parent='parent_value', - ) - - -def test_list_entity_types_pager(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_entity_types), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - featurestore_service.ListEntityTypesResponse( - entity_types=[ - entity_type.EntityType(), - entity_type.EntityType(), - entity_type.EntityType(), - ], - next_page_token='abc', - ), - featurestore_service.ListEntityTypesResponse( - entity_types=[], - next_page_token='def', - ), - featurestore_service.ListEntityTypesResponse( - entity_types=[ - entity_type.EntityType(), - ], - next_page_token='ghi', - ), - featurestore_service.ListEntityTypesResponse( - entity_types=[ - entity_type.EntityType(), - entity_type.EntityType(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_entity_types(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, entity_type.EntityType) - for i in results) - -def test_list_entity_types_pages(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_entity_types), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - featurestore_service.ListEntityTypesResponse( - entity_types=[ - entity_type.EntityType(), - entity_type.EntityType(), - entity_type.EntityType(), - ], - next_page_token='abc', - ), - featurestore_service.ListEntityTypesResponse( - entity_types=[], - next_page_token='def', - ), - featurestore_service.ListEntityTypesResponse( - entity_types=[ - entity_type.EntityType(), - ], - next_page_token='ghi', - ), - featurestore_service.ListEntityTypesResponse( - entity_types=[ - entity_type.EntityType(), - entity_type.EntityType(), - ], - ), - RuntimeError, - ) - pages = list(client.list_entity_types(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_entity_types_async_pager(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_entity_types), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - featurestore_service.ListEntityTypesResponse( - entity_types=[ - entity_type.EntityType(), - entity_type.EntityType(), - entity_type.EntityType(), - ], - next_page_token='abc', - ), - featurestore_service.ListEntityTypesResponse( - entity_types=[], - next_page_token='def', - ), - featurestore_service.ListEntityTypesResponse( - entity_types=[ - entity_type.EntityType(), - ], - next_page_token='ghi', - ), - featurestore_service.ListEntityTypesResponse( - entity_types=[ - entity_type.EntityType(), - entity_type.EntityType(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_entity_types(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, entity_type.EntityType) - for i in responses) - -@pytest.mark.asyncio -async def test_list_entity_types_async_pages(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_entity_types), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - featurestore_service.ListEntityTypesResponse( - entity_types=[ - entity_type.EntityType(), - entity_type.EntityType(), - entity_type.EntityType(), - ], - next_page_token='abc', - ), - featurestore_service.ListEntityTypesResponse( - entity_types=[], - next_page_token='def', - ), - featurestore_service.ListEntityTypesResponse( - entity_types=[ - entity_type.EntityType(), - ], - next_page_token='ghi', - ), - featurestore_service.ListEntityTypesResponse( - entity_types=[ - entity_type.EntityType(), - entity_type.EntityType(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_entity_types(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_update_entity_type(transport: str = 'grpc', request_type=featurestore_service.UpdateEntityTypeRequest): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_entity_type), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_entity_type.EntityType( - name='name_value', - description='description_value', - etag='etag_value', - ) - response = client.update_entity_type(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.UpdateEntityTypeRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_entity_type.EntityType) - assert response.name == 'name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - - -def test_update_entity_type_from_dict(): - test_update_entity_type(request_type=dict) - - -def test_update_entity_type_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_entity_type), - '__call__') as call: - client.update_entity_type() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.UpdateEntityTypeRequest() - - -@pytest.mark.asyncio -async def test_update_entity_type_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.UpdateEntityTypeRequest): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_entity_type), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_entity_type.EntityType( - name='name_value', - description='description_value', - etag='etag_value', - )) - response = await client.update_entity_type(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.UpdateEntityTypeRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_entity_type.EntityType) - assert response.name == 'name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_update_entity_type_async_from_dict(): - await test_update_entity_type_async(request_type=dict) - - -def test_update_entity_type_field_headers(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.UpdateEntityTypeRequest() - - request.entity_type.name = 'entity_type.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_entity_type), - '__call__') as call: - call.return_value = gca_entity_type.EntityType() - client.update_entity_type(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'entity_type.name=entity_type.name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_entity_type_field_headers_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.UpdateEntityTypeRequest() - - request.entity_type.name = 'entity_type.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_entity_type), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_entity_type.EntityType()) - await client.update_entity_type(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'entity_type.name=entity_type.name/value', - ) in kw['metadata'] - - -def test_update_entity_type_flattened(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_entity_type), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_entity_type.EntityType() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_entity_type( - entity_type=gca_entity_type.EntityType(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].entity_type - mock_val = gca_entity_type.EntityType(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -def test_update_entity_type_flattened_error(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_entity_type( - featurestore_service.UpdateEntityTypeRequest(), - entity_type=gca_entity_type.EntityType(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.asyncio -async def test_update_entity_type_flattened_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_entity_type), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_entity_type.EntityType() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_entity_type.EntityType()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_entity_type( - entity_type=gca_entity_type.EntityType(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].entity_type - mock_val = gca_entity_type.EntityType(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_update_entity_type_flattened_error_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_entity_type( - featurestore_service.UpdateEntityTypeRequest(), - entity_type=gca_entity_type.EntityType(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_delete_entity_type(transport: str = 'grpc', request_type=featurestore_service.DeleteEntityTypeRequest): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_entity_type), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_entity_type(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.DeleteEntityTypeRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_entity_type_from_dict(): - test_delete_entity_type(request_type=dict) - - -def test_delete_entity_type_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_entity_type), - '__call__') as call: - client.delete_entity_type() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.DeleteEntityTypeRequest() - - -@pytest.mark.asyncio -async def test_delete_entity_type_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.DeleteEntityTypeRequest): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_entity_type), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_entity_type(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.DeleteEntityTypeRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_entity_type_async_from_dict(): - await test_delete_entity_type_async(request_type=dict) - - -def test_delete_entity_type_field_headers(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.DeleteEntityTypeRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_entity_type), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_entity_type(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_entity_type_field_headers_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.DeleteEntityTypeRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_entity_type), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_entity_type(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_entity_type_flattened(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_entity_type), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_entity_type( - name='name_value', - force=True, - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - arg = args[0].force - mock_val = True - assert arg == mock_val - - -def test_delete_entity_type_flattened_error(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_entity_type( - featurestore_service.DeleteEntityTypeRequest(), - name='name_value', - force=True, - ) - - -@pytest.mark.asyncio -async def test_delete_entity_type_flattened_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_entity_type), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_entity_type( - name='name_value', - force=True, - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - arg = args[0].force - mock_val = True - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_entity_type_flattened_error_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_entity_type( - featurestore_service.DeleteEntityTypeRequest(), - name='name_value', - force=True, - ) - - -def test_create_feature(transport: str = 'grpc', request_type=featurestore_service.CreateFeatureRequest): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_feature), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.create_feature(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.CreateFeatureRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_create_feature_from_dict(): - test_create_feature(request_type=dict) - - -def test_create_feature_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_feature), - '__call__') as call: - client.create_feature() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.CreateFeatureRequest() - - -@pytest.mark.asyncio -async def test_create_feature_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.CreateFeatureRequest): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_feature), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.create_feature(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.CreateFeatureRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_create_feature_async_from_dict(): - await test_create_feature_async(request_type=dict) - - -def test_create_feature_field_headers(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.CreateFeatureRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_feature), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.create_feature(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_feature_field_headers_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.CreateFeatureRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_feature), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.create_feature(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_feature_flattened(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_feature), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_feature( - parent='parent_value', - feature=gca_feature.Feature(name='name_value'), - feature_id='feature_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].feature - mock_val = gca_feature.Feature(name='name_value') - assert arg == mock_val - arg = args[0].feature_id - mock_val = 'feature_id_value' - assert arg == mock_val - - -def test_create_feature_flattened_error(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_feature( - featurestore_service.CreateFeatureRequest(), - parent='parent_value', - feature=gca_feature.Feature(name='name_value'), - feature_id='feature_id_value', - ) - - -@pytest.mark.asyncio -async def test_create_feature_flattened_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_feature), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_feature( - parent='parent_value', - feature=gca_feature.Feature(name='name_value'), - feature_id='feature_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].feature - mock_val = gca_feature.Feature(name='name_value') - assert arg == mock_val - arg = args[0].feature_id - mock_val = 'feature_id_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_feature_flattened_error_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_feature( - featurestore_service.CreateFeatureRequest(), - parent='parent_value', - feature=gca_feature.Feature(name='name_value'), - feature_id='feature_id_value', - ) - - -def test_batch_create_features(transport: str = 'grpc', request_type=featurestore_service.BatchCreateFeaturesRequest): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_create_features), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.batch_create_features(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.BatchCreateFeaturesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_batch_create_features_from_dict(): - test_batch_create_features(request_type=dict) - - -def test_batch_create_features_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_create_features), - '__call__') as call: - client.batch_create_features() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.BatchCreateFeaturesRequest() - - -@pytest.mark.asyncio -async def test_batch_create_features_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.BatchCreateFeaturesRequest): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_create_features), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.batch_create_features(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.BatchCreateFeaturesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_batch_create_features_async_from_dict(): - await test_batch_create_features_async(request_type=dict) - - -def test_batch_create_features_field_headers(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.BatchCreateFeaturesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_create_features), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.batch_create_features(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_batch_create_features_field_headers_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.BatchCreateFeaturesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_create_features), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.batch_create_features(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_batch_create_features_flattened(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_create_features), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.batch_create_features( - parent='parent_value', - requests=[featurestore_service.CreateFeatureRequest(parent='parent_value')], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].requests - mock_val = [featurestore_service.CreateFeatureRequest(parent='parent_value')] - assert arg == mock_val - - -def test_batch_create_features_flattened_error(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.batch_create_features( - featurestore_service.BatchCreateFeaturesRequest(), - parent='parent_value', - requests=[featurestore_service.CreateFeatureRequest(parent='parent_value')], - ) - - -@pytest.mark.asyncio -async def test_batch_create_features_flattened_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_create_features), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.batch_create_features( - parent='parent_value', - requests=[featurestore_service.CreateFeatureRequest(parent='parent_value')], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].requests - mock_val = [featurestore_service.CreateFeatureRequest(parent='parent_value')] - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_batch_create_features_flattened_error_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.batch_create_features( - featurestore_service.BatchCreateFeaturesRequest(), - parent='parent_value', - requests=[featurestore_service.CreateFeatureRequest(parent='parent_value')], - ) - - -def test_get_feature(transport: str = 'grpc', request_type=featurestore_service.GetFeatureRequest): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_feature), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = feature.Feature( - name='name_value', - description='description_value', - value_type=feature.Feature.ValueType.BOOL, - etag='etag_value', - ) - response = client.get_feature(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.GetFeatureRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, feature.Feature) - assert response.name == 'name_value' - assert response.description == 'description_value' - assert response.value_type == feature.Feature.ValueType.BOOL - assert response.etag == 'etag_value' - - -def test_get_feature_from_dict(): - test_get_feature(request_type=dict) - - -def test_get_feature_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_feature), - '__call__') as call: - client.get_feature() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.GetFeatureRequest() - - -@pytest.mark.asyncio -async def test_get_feature_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.GetFeatureRequest): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_feature), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(feature.Feature( - name='name_value', - description='description_value', - value_type=feature.Feature.ValueType.BOOL, - etag='etag_value', - )) - response = await client.get_feature(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.GetFeatureRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, feature.Feature) - assert response.name == 'name_value' - assert response.description == 'description_value' - assert response.value_type == feature.Feature.ValueType.BOOL - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_get_feature_async_from_dict(): - await test_get_feature_async(request_type=dict) - - -def test_get_feature_field_headers(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.GetFeatureRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_feature), - '__call__') as call: - call.return_value = feature.Feature() - client.get_feature(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_feature_field_headers_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.GetFeatureRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_feature), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(feature.Feature()) - await client.get_feature(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_feature_flattened(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_feature), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = feature.Feature() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_feature( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_feature_flattened_error(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_feature( - featurestore_service.GetFeatureRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_feature_flattened_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_feature), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = feature.Feature() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(feature.Feature()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_feature( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_feature_flattened_error_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_feature( - featurestore_service.GetFeatureRequest(), - name='name_value', - ) - - -def test_list_features(transport: str = 'grpc', request_type=featurestore_service.ListFeaturesRequest): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_features), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = featurestore_service.ListFeaturesResponse( - next_page_token='next_page_token_value', - ) - response = client.list_features(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.ListFeaturesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListFeaturesPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_features_from_dict(): - test_list_features(request_type=dict) - - -def test_list_features_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_features), - '__call__') as call: - client.list_features() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.ListFeaturesRequest() - - -@pytest.mark.asyncio -async def test_list_features_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.ListFeaturesRequest): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_features), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.ListFeaturesResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_features(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.ListFeaturesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListFeaturesAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_features_async_from_dict(): - await test_list_features_async(request_type=dict) - - -def test_list_features_field_headers(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.ListFeaturesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_features), - '__call__') as call: - call.return_value = featurestore_service.ListFeaturesResponse() - client.list_features(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_features_field_headers_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.ListFeaturesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_features), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.ListFeaturesResponse()) - await client.list_features(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_features_flattened(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_features), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = featurestore_service.ListFeaturesResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_features( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_features_flattened_error(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_features( - featurestore_service.ListFeaturesRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_features_flattened_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_features), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = featurestore_service.ListFeaturesResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.ListFeaturesResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_features( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_features_flattened_error_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_features( - featurestore_service.ListFeaturesRequest(), - parent='parent_value', - ) - - -def test_list_features_pager(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_features), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - featurestore_service.ListFeaturesResponse( - features=[ - feature.Feature(), - feature.Feature(), - feature.Feature(), - ], - next_page_token='abc', - ), - featurestore_service.ListFeaturesResponse( - features=[], - next_page_token='def', - ), - featurestore_service.ListFeaturesResponse( - features=[ - feature.Feature(), - ], - next_page_token='ghi', - ), - featurestore_service.ListFeaturesResponse( - features=[ - feature.Feature(), - feature.Feature(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_features(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, feature.Feature) - for i in results) - -def test_list_features_pages(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_features), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - featurestore_service.ListFeaturesResponse( - features=[ - feature.Feature(), - feature.Feature(), - feature.Feature(), - ], - next_page_token='abc', - ), - featurestore_service.ListFeaturesResponse( - features=[], - next_page_token='def', - ), - featurestore_service.ListFeaturesResponse( - features=[ - feature.Feature(), - ], - next_page_token='ghi', - ), - featurestore_service.ListFeaturesResponse( - features=[ - feature.Feature(), - feature.Feature(), - ], - ), - RuntimeError, - ) - pages = list(client.list_features(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_features_async_pager(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_features), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - featurestore_service.ListFeaturesResponse( - features=[ - feature.Feature(), - feature.Feature(), - feature.Feature(), - ], - next_page_token='abc', - ), - featurestore_service.ListFeaturesResponse( - features=[], - next_page_token='def', - ), - featurestore_service.ListFeaturesResponse( - features=[ - feature.Feature(), - ], - next_page_token='ghi', - ), - featurestore_service.ListFeaturesResponse( - features=[ - feature.Feature(), - feature.Feature(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_features(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, feature.Feature) - for i in responses) - -@pytest.mark.asyncio -async def test_list_features_async_pages(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_features), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - featurestore_service.ListFeaturesResponse( - features=[ - feature.Feature(), - feature.Feature(), - feature.Feature(), - ], - next_page_token='abc', - ), - featurestore_service.ListFeaturesResponse( - features=[], - next_page_token='def', - ), - featurestore_service.ListFeaturesResponse( - features=[ - feature.Feature(), - ], - next_page_token='ghi', - ), - featurestore_service.ListFeaturesResponse( - features=[ - feature.Feature(), - feature.Feature(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_features(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_update_feature(transport: str = 'grpc', request_type=featurestore_service.UpdateFeatureRequest): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_feature), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_feature.Feature( - name='name_value', - description='description_value', - value_type=gca_feature.Feature.ValueType.BOOL, - etag='etag_value', - ) - response = client.update_feature(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.UpdateFeatureRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_feature.Feature) - assert response.name == 'name_value' - assert response.description == 'description_value' - assert response.value_type == gca_feature.Feature.ValueType.BOOL - assert response.etag == 'etag_value' - - -def test_update_feature_from_dict(): - test_update_feature(request_type=dict) - - -def test_update_feature_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_feature), - '__call__') as call: - client.update_feature() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.UpdateFeatureRequest() - - -@pytest.mark.asyncio -async def test_update_feature_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.UpdateFeatureRequest): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_feature), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_feature.Feature( - name='name_value', - description='description_value', - value_type=gca_feature.Feature.ValueType.BOOL, - etag='etag_value', - )) - response = await client.update_feature(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.UpdateFeatureRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_feature.Feature) - assert response.name == 'name_value' - assert response.description == 'description_value' - assert response.value_type == gca_feature.Feature.ValueType.BOOL - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_update_feature_async_from_dict(): - await test_update_feature_async(request_type=dict) - - -def test_update_feature_field_headers(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.UpdateFeatureRequest() - - request.feature.name = 'feature.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_feature), - '__call__') as call: - call.return_value = gca_feature.Feature() - client.update_feature(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'feature.name=feature.name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_feature_field_headers_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.UpdateFeatureRequest() - - request.feature.name = 'feature.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_feature), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_feature.Feature()) - await client.update_feature(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'feature.name=feature.name/value', - ) in kw['metadata'] - - -def test_update_feature_flattened(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_feature), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_feature.Feature() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_feature( - feature=gca_feature.Feature(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].feature - mock_val = gca_feature.Feature(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -def test_update_feature_flattened_error(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_feature( - featurestore_service.UpdateFeatureRequest(), - feature=gca_feature.Feature(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.asyncio -async def test_update_feature_flattened_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_feature), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_feature.Feature() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_feature.Feature()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_feature( - feature=gca_feature.Feature(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].feature - mock_val = gca_feature.Feature(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_update_feature_flattened_error_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_feature( - featurestore_service.UpdateFeatureRequest(), - feature=gca_feature.Feature(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_delete_feature(transport: str = 'grpc', request_type=featurestore_service.DeleteFeatureRequest): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_feature), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_feature(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.DeleteFeatureRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_feature_from_dict(): - test_delete_feature(request_type=dict) - - -def test_delete_feature_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_feature), - '__call__') as call: - client.delete_feature() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.DeleteFeatureRequest() - - -@pytest.mark.asyncio -async def test_delete_feature_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.DeleteFeatureRequest): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_feature), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_feature(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.DeleteFeatureRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_feature_async_from_dict(): - await test_delete_feature_async(request_type=dict) - - -def test_delete_feature_field_headers(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.DeleteFeatureRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_feature), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_feature(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_feature_field_headers_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.DeleteFeatureRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_feature), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_feature(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_feature_flattened(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_feature), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_feature( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_delete_feature_flattened_error(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_feature( - featurestore_service.DeleteFeatureRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_feature_flattened_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_feature), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_feature( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_feature_flattened_error_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_feature( - featurestore_service.DeleteFeatureRequest(), - name='name_value', - ) - - -def test_import_feature_values(transport: str = 'grpc', request_type=featurestore_service.ImportFeatureValuesRequest): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_feature_values), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.import_feature_values(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.ImportFeatureValuesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_import_feature_values_from_dict(): - test_import_feature_values(request_type=dict) - - -def test_import_feature_values_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_feature_values), - '__call__') as call: - client.import_feature_values() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.ImportFeatureValuesRequest() - - -@pytest.mark.asyncio -async def test_import_feature_values_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.ImportFeatureValuesRequest): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_feature_values), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.import_feature_values(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.ImportFeatureValuesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_import_feature_values_async_from_dict(): - await test_import_feature_values_async(request_type=dict) - - -def test_import_feature_values_field_headers(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.ImportFeatureValuesRequest() - - request.entity_type = 'entity_type/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_feature_values), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.import_feature_values(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'entity_type=entity_type/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_import_feature_values_field_headers_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.ImportFeatureValuesRequest() - - request.entity_type = 'entity_type/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_feature_values), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.import_feature_values(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'entity_type=entity_type/value', - ) in kw['metadata'] - - -def test_import_feature_values_flattened(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_feature_values), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.import_feature_values( - entity_type='entity_type_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].entity_type - mock_val = 'entity_type_value' - assert arg == mock_val - - -def test_import_feature_values_flattened_error(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.import_feature_values( - featurestore_service.ImportFeatureValuesRequest(), - entity_type='entity_type_value', - ) - - -@pytest.mark.asyncio -async def test_import_feature_values_flattened_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.import_feature_values), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.import_feature_values( - entity_type='entity_type_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].entity_type - mock_val = 'entity_type_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_import_feature_values_flattened_error_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.import_feature_values( - featurestore_service.ImportFeatureValuesRequest(), - entity_type='entity_type_value', - ) - - -def test_batch_read_feature_values(transport: str = 'grpc', request_type=featurestore_service.BatchReadFeatureValuesRequest): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_read_feature_values), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.batch_read_feature_values(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.BatchReadFeatureValuesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_batch_read_feature_values_from_dict(): - test_batch_read_feature_values(request_type=dict) - - -def test_batch_read_feature_values_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_read_feature_values), - '__call__') as call: - client.batch_read_feature_values() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.BatchReadFeatureValuesRequest() - - -@pytest.mark.asyncio -async def test_batch_read_feature_values_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.BatchReadFeatureValuesRequest): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_read_feature_values), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.batch_read_feature_values(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.BatchReadFeatureValuesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_batch_read_feature_values_async_from_dict(): - await test_batch_read_feature_values_async(request_type=dict) - - -def test_batch_read_feature_values_field_headers(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.BatchReadFeatureValuesRequest() - - request.featurestore = 'featurestore/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_read_feature_values), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.batch_read_feature_values(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'featurestore=featurestore/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_batch_read_feature_values_field_headers_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.BatchReadFeatureValuesRequest() - - request.featurestore = 'featurestore/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_read_feature_values), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.batch_read_feature_values(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'featurestore=featurestore/value', - ) in kw['metadata'] - - -def test_batch_read_feature_values_flattened(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_read_feature_values), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.batch_read_feature_values( - featurestore='featurestore_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].featurestore - mock_val = 'featurestore_value' - assert arg == mock_val - - -def test_batch_read_feature_values_flattened_error(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.batch_read_feature_values( - featurestore_service.BatchReadFeatureValuesRequest(), - featurestore='featurestore_value', - ) - - -@pytest.mark.asyncio -async def test_batch_read_feature_values_flattened_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_read_feature_values), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.batch_read_feature_values( - featurestore='featurestore_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].featurestore - mock_val = 'featurestore_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_batch_read_feature_values_flattened_error_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.batch_read_feature_values( - featurestore_service.BatchReadFeatureValuesRequest(), - featurestore='featurestore_value', - ) - - -def test_export_feature_values(transport: str = 'grpc', request_type=featurestore_service.ExportFeatureValuesRequest): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_feature_values), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.export_feature_values(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.ExportFeatureValuesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_export_feature_values_from_dict(): - test_export_feature_values(request_type=dict) - - -def test_export_feature_values_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_feature_values), - '__call__') as call: - client.export_feature_values() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.ExportFeatureValuesRequest() - - -@pytest.mark.asyncio -async def test_export_feature_values_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.ExportFeatureValuesRequest): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_feature_values), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.export_feature_values(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.ExportFeatureValuesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_export_feature_values_async_from_dict(): - await test_export_feature_values_async(request_type=dict) - - -def test_export_feature_values_field_headers(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.ExportFeatureValuesRequest() - - request.entity_type = 'entity_type/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_feature_values), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.export_feature_values(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'entity_type=entity_type/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_export_feature_values_field_headers_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.ExportFeatureValuesRequest() - - request.entity_type = 'entity_type/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_feature_values), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.export_feature_values(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'entity_type=entity_type/value', - ) in kw['metadata'] - - -def test_export_feature_values_flattened(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_feature_values), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.export_feature_values( - entity_type='entity_type_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].entity_type - mock_val = 'entity_type_value' - assert arg == mock_val - - -def test_export_feature_values_flattened_error(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.export_feature_values( - featurestore_service.ExportFeatureValuesRequest(), - entity_type='entity_type_value', - ) - - -@pytest.mark.asyncio -async def test_export_feature_values_flattened_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_feature_values), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.export_feature_values( - entity_type='entity_type_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].entity_type - mock_val = 'entity_type_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_export_feature_values_flattened_error_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.export_feature_values( - featurestore_service.ExportFeatureValuesRequest(), - entity_type='entity_type_value', - ) - - -def test_search_features(transport: str = 'grpc', request_type=featurestore_service.SearchFeaturesRequest): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_features), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = featurestore_service.SearchFeaturesResponse( - next_page_token='next_page_token_value', - ) - response = client.search_features(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.SearchFeaturesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.SearchFeaturesPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_search_features_from_dict(): - test_search_features(request_type=dict) - - -def test_search_features_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_features), - '__call__') as call: - client.search_features() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.SearchFeaturesRequest() - - -@pytest.mark.asyncio -async def test_search_features_async(transport: str = 'grpc_asyncio', request_type=featurestore_service.SearchFeaturesRequest): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_features), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.SearchFeaturesResponse( - next_page_token='next_page_token_value', - )) - response = await client.search_features(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == featurestore_service.SearchFeaturesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.SearchFeaturesAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_search_features_async_from_dict(): - await test_search_features_async(request_type=dict) - - -def test_search_features_field_headers(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.SearchFeaturesRequest() - - request.location = 'location/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_features), - '__call__') as call: - call.return_value = featurestore_service.SearchFeaturesResponse() - client.search_features(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'location=location/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_search_features_field_headers_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = featurestore_service.SearchFeaturesRequest() - - request.location = 'location/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_features), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.SearchFeaturesResponse()) - await client.search_features(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'location=location/value', - ) in kw['metadata'] - - -def test_search_features_flattened(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_features), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = featurestore_service.SearchFeaturesResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.search_features( - location='location_value', - query='query_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].location - mock_val = 'location_value' - assert arg == mock_val - arg = args[0].query - mock_val = 'query_value' - assert arg == mock_val - - -def test_search_features_flattened_error(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.search_features( - featurestore_service.SearchFeaturesRequest(), - location='location_value', - query='query_value', - ) - - -@pytest.mark.asyncio -async def test_search_features_flattened_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_features), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = featurestore_service.SearchFeaturesResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(featurestore_service.SearchFeaturesResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.search_features( - location='location_value', - query='query_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].location - mock_val = 'location_value' - assert arg == mock_val - arg = args[0].query - mock_val = 'query_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_search_features_flattened_error_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.search_features( - featurestore_service.SearchFeaturesRequest(), - location='location_value', - query='query_value', - ) - - -def test_search_features_pager(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_features), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - featurestore_service.SearchFeaturesResponse( - features=[ - feature.Feature(), - feature.Feature(), - feature.Feature(), - ], - next_page_token='abc', - ), - featurestore_service.SearchFeaturesResponse( - features=[], - next_page_token='def', - ), - featurestore_service.SearchFeaturesResponse( - features=[ - feature.Feature(), - ], - next_page_token='ghi', - ), - featurestore_service.SearchFeaturesResponse( - features=[ - feature.Feature(), - feature.Feature(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('location', ''), - )), - ) - pager = client.search_features(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, feature.Feature) - for i in results) - -def test_search_features_pages(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_features), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - featurestore_service.SearchFeaturesResponse( - features=[ - feature.Feature(), - feature.Feature(), - feature.Feature(), - ], - next_page_token='abc', - ), - featurestore_service.SearchFeaturesResponse( - features=[], - next_page_token='def', - ), - featurestore_service.SearchFeaturesResponse( - features=[ - feature.Feature(), - ], - next_page_token='ghi', - ), - featurestore_service.SearchFeaturesResponse( - features=[ - feature.Feature(), - feature.Feature(), - ], - ), - RuntimeError, - ) - pages = list(client.search_features(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_search_features_async_pager(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_features), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - featurestore_service.SearchFeaturesResponse( - features=[ - feature.Feature(), - feature.Feature(), - feature.Feature(), - ], - next_page_token='abc', - ), - featurestore_service.SearchFeaturesResponse( - features=[], - next_page_token='def', - ), - featurestore_service.SearchFeaturesResponse( - features=[ - feature.Feature(), - ], - next_page_token='ghi', - ), - featurestore_service.SearchFeaturesResponse( - features=[ - feature.Feature(), - feature.Feature(), - ], - ), - RuntimeError, - ) - async_pager = await client.search_features(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, feature.Feature) - for i in responses) - -@pytest.mark.asyncio -async def test_search_features_async_pages(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_features), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - featurestore_service.SearchFeaturesResponse( - features=[ - feature.Feature(), - feature.Feature(), - feature.Feature(), - ], - next_page_token='abc', - ), - featurestore_service.SearchFeaturesResponse( - features=[], - next_page_token='def', - ), - featurestore_service.SearchFeaturesResponse( - features=[ - feature.Feature(), - ], - next_page_token='ghi', - ), - featurestore_service.SearchFeaturesResponse( - features=[ - feature.Feature(), - feature.Feature(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.search_features(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.FeaturestoreServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.FeaturestoreServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = FeaturestoreServiceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.FeaturestoreServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = FeaturestoreServiceClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.FeaturestoreServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = FeaturestoreServiceClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.FeaturestoreServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.FeaturestoreServiceGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.FeaturestoreServiceGrpcTransport, - transports.FeaturestoreServiceGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.FeaturestoreServiceGrpcTransport, - ) - -def test_featurestore_service_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.FeaturestoreServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_featurestore_service_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.aiplatform_v1beta1.services.featurestore_service.transports.FeaturestoreServiceTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.FeaturestoreServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'create_featurestore', - 'get_featurestore', - 'list_featurestores', - 'update_featurestore', - 'delete_featurestore', - 'create_entity_type', - 'get_entity_type', - 'list_entity_types', - 'update_entity_type', - 'delete_entity_type', - 'create_feature', - 'batch_create_features', - 'get_feature', - 'list_features', - 'update_feature', - 'delete_feature', - 'import_feature_values', - 'batch_read_feature_values', - 'export_feature_values', - 'search_features', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - with pytest.raises(NotImplementedError): - transport.close() - - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client - - -def test_featurestore_service_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.featurestore_service.transports.FeaturestoreServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.FeaturestoreServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -def test_featurestore_service_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.featurestore_service.transports.FeaturestoreServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.FeaturestoreServiceTransport() - adc.assert_called_once() - - -def test_featurestore_service_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - FeaturestoreServiceClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.FeaturestoreServiceGrpcTransport, - transports.FeaturestoreServiceGrpcAsyncIOTransport, - ], -) -def test_featurestore_service_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.FeaturestoreServiceGrpcTransport, grpc_helpers), - (transports.FeaturestoreServiceGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_featurestore_service_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "aiplatform.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="aiplatform.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.FeaturestoreServiceGrpcTransport, transports.FeaturestoreServiceGrpcAsyncIOTransport]) -def test_featurestore_service_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_featurestore_service_host_no_port(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_featurestore_service_host_with_port(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:8000' - -def test_featurestore_service_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.FeaturestoreServiceGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_featurestore_service_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.FeaturestoreServiceGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.FeaturestoreServiceGrpcTransport, transports.FeaturestoreServiceGrpcAsyncIOTransport]) -def test_featurestore_service_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.FeaturestoreServiceGrpcTransport, transports.FeaturestoreServiceGrpcAsyncIOTransport]) -def test_featurestore_service_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_featurestore_service_grpc_lro_client(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_featurestore_service_grpc_lro_async_client(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc_asyncio', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsAsyncClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_entity_type_path(): - project = "squid" - location = "clam" - featurestore = "whelk" - entity_type = "octopus" - expected = "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}".format(project=project, location=location, featurestore=featurestore, entity_type=entity_type, ) - actual = FeaturestoreServiceClient.entity_type_path(project, location, featurestore, entity_type) - assert expected == actual - - -def test_parse_entity_type_path(): - expected = { - "project": "oyster", - "location": "nudibranch", - "featurestore": "cuttlefish", - "entity_type": "mussel", - } - path = FeaturestoreServiceClient.entity_type_path(**expected) - - # Check that the path construction is reversible. - actual = FeaturestoreServiceClient.parse_entity_type_path(path) - assert expected == actual - -def test_feature_path(): - project = "winkle" - location = "nautilus" - featurestore = "scallop" - entity_type = "abalone" - feature = "squid" - expected = "projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}".format(project=project, location=location, featurestore=featurestore, entity_type=entity_type, feature=feature, ) - actual = FeaturestoreServiceClient.feature_path(project, location, featurestore, entity_type, feature) - assert expected == actual - - -def test_parse_feature_path(): - expected = { - "project": "clam", - "location": "whelk", - "featurestore": "octopus", - "entity_type": "oyster", - "feature": "nudibranch", - } - path = FeaturestoreServiceClient.feature_path(**expected) - - # Check that the path construction is reversible. - actual = FeaturestoreServiceClient.parse_feature_path(path) - assert expected == actual - -def test_featurestore_path(): - project = "cuttlefish" - location = "mussel" - featurestore = "winkle" - expected = "projects/{project}/locations/{location}/featurestores/{featurestore}".format(project=project, location=location, featurestore=featurestore, ) - actual = FeaturestoreServiceClient.featurestore_path(project, location, featurestore) - assert expected == actual - - -def test_parse_featurestore_path(): - expected = { - "project": "nautilus", - "location": "scallop", - "featurestore": "abalone", - } - path = FeaturestoreServiceClient.featurestore_path(**expected) - - # Check that the path construction is reversible. - actual = FeaturestoreServiceClient.parse_featurestore_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "squid" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = FeaturestoreServiceClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "clam", - } - path = FeaturestoreServiceClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = FeaturestoreServiceClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "whelk" - expected = "folders/{folder}".format(folder=folder, ) - actual = FeaturestoreServiceClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "octopus", - } - path = FeaturestoreServiceClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = FeaturestoreServiceClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "oyster" - expected = "organizations/{organization}".format(organization=organization, ) - actual = FeaturestoreServiceClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "nudibranch", - } - path = FeaturestoreServiceClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = FeaturestoreServiceClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "cuttlefish" - expected = "projects/{project}".format(project=project, ) - actual = FeaturestoreServiceClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "mussel", - } - path = FeaturestoreServiceClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = FeaturestoreServiceClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "winkle" - location = "nautilus" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = FeaturestoreServiceClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "scallop", - "location": "abalone", - } - path = FeaturestoreServiceClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = FeaturestoreServiceClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.FeaturestoreServiceTransport, '_prep_wrapped_messages') as prep: - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.FeaturestoreServiceTransport, '_prep_wrapped_messages') as prep: - transport_class = FeaturestoreServiceClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - -@pytest.mark.asyncio -async def test_transport_close_async(): - client = FeaturestoreServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: - async with client: - close.assert_not_called() - close.assert_called_once() - -def test_transport_close(): - transports = { - "grpc": "_grpc_channel", - } - - for transport, close_name in transports.items(): - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: - with client: - close.assert_not_called() - close.assert_called_once() - -def test_client_ctx(): - transports = [ - 'grpc', - ] - for transport in transports: - client = FeaturestoreServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - # Test client calls underlying transport. - with mock.patch.object(type(client.transport), "close") as close: - close.assert_not_called() - with client: - pass - close.assert_called() diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_index_endpoint_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_index_endpoint_service.py deleted file mode 100644 index 304b6a2453..0000000000 --- a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_index_endpoint_service.py +++ /dev/null @@ -1,3138 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import future -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import operation_async # type: ignore -from google.api_core import operations_v1 -from google.api_core import path_template -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.aiplatform_v1beta1.services.index_endpoint_service import IndexEndpointServiceAsyncClient -from google.cloud.aiplatform_v1beta1.services.index_endpoint_service import IndexEndpointServiceClient -from google.cloud.aiplatform_v1beta1.services.index_endpoint_service import pagers -from google.cloud.aiplatform_v1beta1.services.index_endpoint_service import transports -from google.cloud.aiplatform_v1beta1.types import index_endpoint -from google.cloud.aiplatform_v1beta1.types import index_endpoint as gca_index_endpoint -from google.cloud.aiplatform_v1beta1.types import index_endpoint_service -from google.cloud.aiplatform_v1beta1.types import machine_resources -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.longrunning import operations_pb2 -from google.oauth2 import service_account -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -import google.auth - - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert IndexEndpointServiceClient._get_default_mtls_endpoint(None) is None - assert IndexEndpointServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert IndexEndpointServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert IndexEndpointServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert IndexEndpointServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert IndexEndpointServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - IndexEndpointServiceClient, - IndexEndpointServiceAsyncClient, -]) -def test_index_endpoint_service_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.IndexEndpointServiceGrpcTransport, "grpc"), - (transports.IndexEndpointServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_index_endpoint_service_client_service_account_always_use_jwt(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=False) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("client_class", [ - IndexEndpointServiceClient, - IndexEndpointServiceAsyncClient, -]) -def test_index_endpoint_service_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_index_endpoint_service_client_get_transport_class(): - transport = IndexEndpointServiceClient.get_transport_class() - available_transports = [ - transports.IndexEndpointServiceGrpcTransport, - ] - assert transport in available_transports - - transport = IndexEndpointServiceClient.get_transport_class("grpc") - assert transport == transports.IndexEndpointServiceGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (IndexEndpointServiceClient, transports.IndexEndpointServiceGrpcTransport, "grpc"), - (IndexEndpointServiceAsyncClient, transports.IndexEndpointServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(IndexEndpointServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IndexEndpointServiceClient)) -@mock.patch.object(IndexEndpointServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IndexEndpointServiceAsyncClient)) -def test_index_endpoint_service_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(IndexEndpointServiceClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(IndexEndpointServiceClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (IndexEndpointServiceClient, transports.IndexEndpointServiceGrpcTransport, "grpc", "true"), - (IndexEndpointServiceAsyncClient, transports.IndexEndpointServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (IndexEndpointServiceClient, transports.IndexEndpointServiceGrpcTransport, "grpc", "false"), - (IndexEndpointServiceAsyncClient, transports.IndexEndpointServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(IndexEndpointServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IndexEndpointServiceClient)) -@mock.patch.object(IndexEndpointServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IndexEndpointServiceAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_index_endpoint_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (IndexEndpointServiceClient, transports.IndexEndpointServiceGrpcTransport, "grpc"), - (IndexEndpointServiceAsyncClient, transports.IndexEndpointServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_index_endpoint_service_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (IndexEndpointServiceClient, transports.IndexEndpointServiceGrpcTransport, "grpc"), - (IndexEndpointServiceAsyncClient, transports.IndexEndpointServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_index_endpoint_service_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_index_endpoint_service_client_client_options_from_dict(): - with mock.patch('google.cloud.aiplatform_v1beta1.services.index_endpoint_service.transports.IndexEndpointServiceGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = IndexEndpointServiceClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_create_index_endpoint(transport: str = 'grpc', request_type=index_endpoint_service.CreateIndexEndpointRequest): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_index_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.create_index_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.CreateIndexEndpointRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_create_index_endpoint_from_dict(): - test_create_index_endpoint(request_type=dict) - - -def test_create_index_endpoint_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_index_endpoint), - '__call__') as call: - client.create_index_endpoint() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.CreateIndexEndpointRequest() - - -@pytest.mark.asyncio -async def test_create_index_endpoint_async(transport: str = 'grpc_asyncio', request_type=index_endpoint_service.CreateIndexEndpointRequest): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_index_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.create_index_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.CreateIndexEndpointRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_create_index_endpoint_async_from_dict(): - await test_create_index_endpoint_async(request_type=dict) - - -def test_create_index_endpoint_field_headers(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_endpoint_service.CreateIndexEndpointRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_index_endpoint), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.create_index_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_index_endpoint_field_headers_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_endpoint_service.CreateIndexEndpointRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_index_endpoint), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.create_index_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_index_endpoint_flattened(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_index_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_index_endpoint( - parent='parent_value', - index_endpoint=gca_index_endpoint.IndexEndpoint(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].index_endpoint - mock_val = gca_index_endpoint.IndexEndpoint(name='name_value') - assert arg == mock_val - - -def test_create_index_endpoint_flattened_error(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_index_endpoint( - index_endpoint_service.CreateIndexEndpointRequest(), - parent='parent_value', - index_endpoint=gca_index_endpoint.IndexEndpoint(name='name_value'), - ) - - -@pytest.mark.asyncio -async def test_create_index_endpoint_flattened_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_index_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_index_endpoint( - parent='parent_value', - index_endpoint=gca_index_endpoint.IndexEndpoint(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].index_endpoint - mock_val = gca_index_endpoint.IndexEndpoint(name='name_value') - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_index_endpoint_flattened_error_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_index_endpoint( - index_endpoint_service.CreateIndexEndpointRequest(), - parent='parent_value', - index_endpoint=gca_index_endpoint.IndexEndpoint(name='name_value'), - ) - - -def test_get_index_endpoint(transport: str = 'grpc', request_type=index_endpoint_service.GetIndexEndpointRequest): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_index_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = index_endpoint.IndexEndpoint( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - network='network_value', - enable_private_service_connect=True, - ) - response = client.get_index_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.GetIndexEndpointRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, index_endpoint.IndexEndpoint) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - assert response.network == 'network_value' - assert response.enable_private_service_connect is True - - -def test_get_index_endpoint_from_dict(): - test_get_index_endpoint(request_type=dict) - - -def test_get_index_endpoint_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_index_endpoint), - '__call__') as call: - client.get_index_endpoint() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.GetIndexEndpointRequest() - - -@pytest.mark.asyncio -async def test_get_index_endpoint_async(transport: str = 'grpc_asyncio', request_type=index_endpoint_service.GetIndexEndpointRequest): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_index_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(index_endpoint.IndexEndpoint( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - network='network_value', - enable_private_service_connect=True, - )) - response = await client.get_index_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.GetIndexEndpointRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, index_endpoint.IndexEndpoint) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - assert response.network == 'network_value' - assert response.enable_private_service_connect is True - - -@pytest.mark.asyncio -async def test_get_index_endpoint_async_from_dict(): - await test_get_index_endpoint_async(request_type=dict) - - -def test_get_index_endpoint_field_headers(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_endpoint_service.GetIndexEndpointRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_index_endpoint), - '__call__') as call: - call.return_value = index_endpoint.IndexEndpoint() - client.get_index_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_index_endpoint_field_headers_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_endpoint_service.GetIndexEndpointRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_index_endpoint), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(index_endpoint.IndexEndpoint()) - await client.get_index_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_index_endpoint_flattened(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_index_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = index_endpoint.IndexEndpoint() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_index_endpoint( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_index_endpoint_flattened_error(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_index_endpoint( - index_endpoint_service.GetIndexEndpointRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_index_endpoint_flattened_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_index_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = index_endpoint.IndexEndpoint() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(index_endpoint.IndexEndpoint()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_index_endpoint( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_index_endpoint_flattened_error_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_index_endpoint( - index_endpoint_service.GetIndexEndpointRequest(), - name='name_value', - ) - - -def test_list_index_endpoints(transport: str = 'grpc', request_type=index_endpoint_service.ListIndexEndpointsRequest): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_index_endpoints), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = index_endpoint_service.ListIndexEndpointsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_index_endpoints(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.ListIndexEndpointsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListIndexEndpointsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_index_endpoints_from_dict(): - test_list_index_endpoints(request_type=dict) - - -def test_list_index_endpoints_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_index_endpoints), - '__call__') as call: - client.list_index_endpoints() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.ListIndexEndpointsRequest() - - -@pytest.mark.asyncio -async def test_list_index_endpoints_async(transport: str = 'grpc_asyncio', request_type=index_endpoint_service.ListIndexEndpointsRequest): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_index_endpoints), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(index_endpoint_service.ListIndexEndpointsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_index_endpoints(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.ListIndexEndpointsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListIndexEndpointsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_index_endpoints_async_from_dict(): - await test_list_index_endpoints_async(request_type=dict) - - -def test_list_index_endpoints_field_headers(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_endpoint_service.ListIndexEndpointsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_index_endpoints), - '__call__') as call: - call.return_value = index_endpoint_service.ListIndexEndpointsResponse() - client.list_index_endpoints(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_index_endpoints_field_headers_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_endpoint_service.ListIndexEndpointsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_index_endpoints), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(index_endpoint_service.ListIndexEndpointsResponse()) - await client.list_index_endpoints(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_index_endpoints_flattened(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_index_endpoints), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = index_endpoint_service.ListIndexEndpointsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_index_endpoints( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_index_endpoints_flattened_error(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_index_endpoints( - index_endpoint_service.ListIndexEndpointsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_index_endpoints_flattened_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_index_endpoints), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = index_endpoint_service.ListIndexEndpointsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(index_endpoint_service.ListIndexEndpointsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_index_endpoints( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_index_endpoints_flattened_error_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_index_endpoints( - index_endpoint_service.ListIndexEndpointsRequest(), - parent='parent_value', - ) - - -def test_list_index_endpoints_pager(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_index_endpoints), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - index_endpoint_service.ListIndexEndpointsResponse( - index_endpoints=[ - index_endpoint.IndexEndpoint(), - index_endpoint.IndexEndpoint(), - index_endpoint.IndexEndpoint(), - ], - next_page_token='abc', - ), - index_endpoint_service.ListIndexEndpointsResponse( - index_endpoints=[], - next_page_token='def', - ), - index_endpoint_service.ListIndexEndpointsResponse( - index_endpoints=[ - index_endpoint.IndexEndpoint(), - ], - next_page_token='ghi', - ), - index_endpoint_service.ListIndexEndpointsResponse( - index_endpoints=[ - index_endpoint.IndexEndpoint(), - index_endpoint.IndexEndpoint(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_index_endpoints(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, index_endpoint.IndexEndpoint) - for i in results) - -def test_list_index_endpoints_pages(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_index_endpoints), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - index_endpoint_service.ListIndexEndpointsResponse( - index_endpoints=[ - index_endpoint.IndexEndpoint(), - index_endpoint.IndexEndpoint(), - index_endpoint.IndexEndpoint(), - ], - next_page_token='abc', - ), - index_endpoint_service.ListIndexEndpointsResponse( - index_endpoints=[], - next_page_token='def', - ), - index_endpoint_service.ListIndexEndpointsResponse( - index_endpoints=[ - index_endpoint.IndexEndpoint(), - ], - next_page_token='ghi', - ), - index_endpoint_service.ListIndexEndpointsResponse( - index_endpoints=[ - index_endpoint.IndexEndpoint(), - index_endpoint.IndexEndpoint(), - ], - ), - RuntimeError, - ) - pages = list(client.list_index_endpoints(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_index_endpoints_async_pager(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_index_endpoints), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - index_endpoint_service.ListIndexEndpointsResponse( - index_endpoints=[ - index_endpoint.IndexEndpoint(), - index_endpoint.IndexEndpoint(), - index_endpoint.IndexEndpoint(), - ], - next_page_token='abc', - ), - index_endpoint_service.ListIndexEndpointsResponse( - index_endpoints=[], - next_page_token='def', - ), - index_endpoint_service.ListIndexEndpointsResponse( - index_endpoints=[ - index_endpoint.IndexEndpoint(), - ], - next_page_token='ghi', - ), - index_endpoint_service.ListIndexEndpointsResponse( - index_endpoints=[ - index_endpoint.IndexEndpoint(), - index_endpoint.IndexEndpoint(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_index_endpoints(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, index_endpoint.IndexEndpoint) - for i in responses) - -@pytest.mark.asyncio -async def test_list_index_endpoints_async_pages(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_index_endpoints), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - index_endpoint_service.ListIndexEndpointsResponse( - index_endpoints=[ - index_endpoint.IndexEndpoint(), - index_endpoint.IndexEndpoint(), - index_endpoint.IndexEndpoint(), - ], - next_page_token='abc', - ), - index_endpoint_service.ListIndexEndpointsResponse( - index_endpoints=[], - next_page_token='def', - ), - index_endpoint_service.ListIndexEndpointsResponse( - index_endpoints=[ - index_endpoint.IndexEndpoint(), - ], - next_page_token='ghi', - ), - index_endpoint_service.ListIndexEndpointsResponse( - index_endpoints=[ - index_endpoint.IndexEndpoint(), - index_endpoint.IndexEndpoint(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_index_endpoints(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_update_index_endpoint(transport: str = 'grpc', request_type=index_endpoint_service.UpdateIndexEndpointRequest): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_index_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_index_endpoint.IndexEndpoint( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - network='network_value', - enable_private_service_connect=True, - ) - response = client.update_index_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.UpdateIndexEndpointRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_index_endpoint.IndexEndpoint) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - assert response.network == 'network_value' - assert response.enable_private_service_connect is True - - -def test_update_index_endpoint_from_dict(): - test_update_index_endpoint(request_type=dict) - - -def test_update_index_endpoint_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_index_endpoint), - '__call__') as call: - client.update_index_endpoint() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.UpdateIndexEndpointRequest() - - -@pytest.mark.asyncio -async def test_update_index_endpoint_async(transport: str = 'grpc_asyncio', request_type=index_endpoint_service.UpdateIndexEndpointRequest): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_index_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_index_endpoint.IndexEndpoint( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - network='network_value', - enable_private_service_connect=True, - )) - response = await client.update_index_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.UpdateIndexEndpointRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_index_endpoint.IndexEndpoint) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - assert response.network == 'network_value' - assert response.enable_private_service_connect is True - - -@pytest.mark.asyncio -async def test_update_index_endpoint_async_from_dict(): - await test_update_index_endpoint_async(request_type=dict) - - -def test_update_index_endpoint_field_headers(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_endpoint_service.UpdateIndexEndpointRequest() - - request.index_endpoint.name = 'index_endpoint.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_index_endpoint), - '__call__') as call: - call.return_value = gca_index_endpoint.IndexEndpoint() - client.update_index_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'index_endpoint.name=index_endpoint.name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_index_endpoint_field_headers_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_endpoint_service.UpdateIndexEndpointRequest() - - request.index_endpoint.name = 'index_endpoint.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_index_endpoint), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_index_endpoint.IndexEndpoint()) - await client.update_index_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'index_endpoint.name=index_endpoint.name/value', - ) in kw['metadata'] - - -def test_update_index_endpoint_flattened(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_index_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_index_endpoint.IndexEndpoint() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_index_endpoint( - index_endpoint=gca_index_endpoint.IndexEndpoint(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].index_endpoint - mock_val = gca_index_endpoint.IndexEndpoint(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -def test_update_index_endpoint_flattened_error(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_index_endpoint( - index_endpoint_service.UpdateIndexEndpointRequest(), - index_endpoint=gca_index_endpoint.IndexEndpoint(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.asyncio -async def test_update_index_endpoint_flattened_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_index_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_index_endpoint.IndexEndpoint() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_index_endpoint.IndexEndpoint()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_index_endpoint( - index_endpoint=gca_index_endpoint.IndexEndpoint(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].index_endpoint - mock_val = gca_index_endpoint.IndexEndpoint(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_update_index_endpoint_flattened_error_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_index_endpoint( - index_endpoint_service.UpdateIndexEndpointRequest(), - index_endpoint=gca_index_endpoint.IndexEndpoint(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_delete_index_endpoint(transport: str = 'grpc', request_type=index_endpoint_service.DeleteIndexEndpointRequest): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_index_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_index_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.DeleteIndexEndpointRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_index_endpoint_from_dict(): - test_delete_index_endpoint(request_type=dict) - - -def test_delete_index_endpoint_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_index_endpoint), - '__call__') as call: - client.delete_index_endpoint() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.DeleteIndexEndpointRequest() - - -@pytest.mark.asyncio -async def test_delete_index_endpoint_async(transport: str = 'grpc_asyncio', request_type=index_endpoint_service.DeleteIndexEndpointRequest): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_index_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_index_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.DeleteIndexEndpointRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_index_endpoint_async_from_dict(): - await test_delete_index_endpoint_async(request_type=dict) - - -def test_delete_index_endpoint_field_headers(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_endpoint_service.DeleteIndexEndpointRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_index_endpoint), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_index_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_index_endpoint_field_headers_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_endpoint_service.DeleteIndexEndpointRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_index_endpoint), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_index_endpoint(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_index_endpoint_flattened(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_index_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_index_endpoint( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_delete_index_endpoint_flattened_error(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_index_endpoint( - index_endpoint_service.DeleteIndexEndpointRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_index_endpoint_flattened_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_index_endpoint), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_index_endpoint( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_index_endpoint_flattened_error_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_index_endpoint( - index_endpoint_service.DeleteIndexEndpointRequest(), - name='name_value', - ) - - -def test_deploy_index(transport: str = 'grpc', request_type=index_endpoint_service.DeployIndexRequest): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.deploy_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.DeployIndexRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_deploy_index_from_dict(): - test_deploy_index(request_type=dict) - - -def test_deploy_index_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_index), - '__call__') as call: - client.deploy_index() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.DeployIndexRequest() - - -@pytest.mark.asyncio -async def test_deploy_index_async(transport: str = 'grpc_asyncio', request_type=index_endpoint_service.DeployIndexRequest): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.deploy_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.DeployIndexRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_deploy_index_async_from_dict(): - await test_deploy_index_async(request_type=dict) - - -def test_deploy_index_field_headers(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_endpoint_service.DeployIndexRequest() - - request.index_endpoint = 'index_endpoint/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_index), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.deploy_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'index_endpoint=index_endpoint/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_deploy_index_field_headers_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_endpoint_service.DeployIndexRequest() - - request.index_endpoint = 'index_endpoint/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_index), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.deploy_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'index_endpoint=index_endpoint/value', - ) in kw['metadata'] - - -def test_deploy_index_flattened(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.deploy_index( - index_endpoint='index_endpoint_value', - deployed_index=gca_index_endpoint.DeployedIndex(id='id_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].index_endpoint - mock_val = 'index_endpoint_value' - assert arg == mock_val - arg = args[0].deployed_index - mock_val = gca_index_endpoint.DeployedIndex(id='id_value') - assert arg == mock_val - - -def test_deploy_index_flattened_error(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.deploy_index( - index_endpoint_service.DeployIndexRequest(), - index_endpoint='index_endpoint_value', - deployed_index=gca_index_endpoint.DeployedIndex(id='id_value'), - ) - - -@pytest.mark.asyncio -async def test_deploy_index_flattened_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.deploy_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.deploy_index( - index_endpoint='index_endpoint_value', - deployed_index=gca_index_endpoint.DeployedIndex(id='id_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].index_endpoint - mock_val = 'index_endpoint_value' - assert arg == mock_val - arg = args[0].deployed_index - mock_val = gca_index_endpoint.DeployedIndex(id='id_value') - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_deploy_index_flattened_error_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.deploy_index( - index_endpoint_service.DeployIndexRequest(), - index_endpoint='index_endpoint_value', - deployed_index=gca_index_endpoint.DeployedIndex(id='id_value'), - ) - - -def test_undeploy_index(transport: str = 'grpc', request_type=index_endpoint_service.UndeployIndexRequest): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.undeploy_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.UndeployIndexRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_undeploy_index_from_dict(): - test_undeploy_index(request_type=dict) - - -def test_undeploy_index_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_index), - '__call__') as call: - client.undeploy_index() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.UndeployIndexRequest() - - -@pytest.mark.asyncio -async def test_undeploy_index_async(transport: str = 'grpc_asyncio', request_type=index_endpoint_service.UndeployIndexRequest): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.undeploy_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.UndeployIndexRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_undeploy_index_async_from_dict(): - await test_undeploy_index_async(request_type=dict) - - -def test_undeploy_index_field_headers(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_endpoint_service.UndeployIndexRequest() - - request.index_endpoint = 'index_endpoint/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_index), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.undeploy_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'index_endpoint=index_endpoint/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_undeploy_index_field_headers_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_endpoint_service.UndeployIndexRequest() - - request.index_endpoint = 'index_endpoint/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_index), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.undeploy_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'index_endpoint=index_endpoint/value', - ) in kw['metadata'] - - -def test_undeploy_index_flattened(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.undeploy_index( - index_endpoint='index_endpoint_value', - deployed_index_id='deployed_index_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].index_endpoint - mock_val = 'index_endpoint_value' - assert arg == mock_val - arg = args[0].deployed_index_id - mock_val = 'deployed_index_id_value' - assert arg == mock_val - - -def test_undeploy_index_flattened_error(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.undeploy_index( - index_endpoint_service.UndeployIndexRequest(), - index_endpoint='index_endpoint_value', - deployed_index_id='deployed_index_id_value', - ) - - -@pytest.mark.asyncio -async def test_undeploy_index_flattened_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undeploy_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.undeploy_index( - index_endpoint='index_endpoint_value', - deployed_index_id='deployed_index_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].index_endpoint - mock_val = 'index_endpoint_value' - assert arg == mock_val - arg = args[0].deployed_index_id - mock_val = 'deployed_index_id_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_undeploy_index_flattened_error_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.undeploy_index( - index_endpoint_service.UndeployIndexRequest(), - index_endpoint='index_endpoint_value', - deployed_index_id='deployed_index_id_value', - ) - - -def test_mutate_deployed_index(transport: str = 'grpc', request_type=index_endpoint_service.MutateDeployedIndexRequest): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.mutate_deployed_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.mutate_deployed_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.MutateDeployedIndexRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_mutate_deployed_index_from_dict(): - test_mutate_deployed_index(request_type=dict) - - -def test_mutate_deployed_index_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.mutate_deployed_index), - '__call__') as call: - client.mutate_deployed_index() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.MutateDeployedIndexRequest() - - -@pytest.mark.asyncio -async def test_mutate_deployed_index_async(transport: str = 'grpc_asyncio', request_type=index_endpoint_service.MutateDeployedIndexRequest): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.mutate_deployed_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.mutate_deployed_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == index_endpoint_service.MutateDeployedIndexRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_mutate_deployed_index_async_from_dict(): - await test_mutate_deployed_index_async(request_type=dict) - - -def test_mutate_deployed_index_field_headers(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_endpoint_service.MutateDeployedIndexRequest() - - request.index_endpoint = 'index_endpoint/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.mutate_deployed_index), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.mutate_deployed_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'index_endpoint=index_endpoint/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_mutate_deployed_index_field_headers_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_endpoint_service.MutateDeployedIndexRequest() - - request.index_endpoint = 'index_endpoint/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.mutate_deployed_index), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.mutate_deployed_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'index_endpoint=index_endpoint/value', - ) in kw['metadata'] - - -def test_mutate_deployed_index_flattened(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.mutate_deployed_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.mutate_deployed_index( - index_endpoint='index_endpoint_value', - deployed_index=gca_index_endpoint.DeployedIndex(id='id_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].index_endpoint - mock_val = 'index_endpoint_value' - assert arg == mock_val - arg = args[0].deployed_index - mock_val = gca_index_endpoint.DeployedIndex(id='id_value') - assert arg == mock_val - - -def test_mutate_deployed_index_flattened_error(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.mutate_deployed_index( - index_endpoint_service.MutateDeployedIndexRequest(), - index_endpoint='index_endpoint_value', - deployed_index=gca_index_endpoint.DeployedIndex(id='id_value'), - ) - - -@pytest.mark.asyncio -async def test_mutate_deployed_index_flattened_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.mutate_deployed_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.mutate_deployed_index( - index_endpoint='index_endpoint_value', - deployed_index=gca_index_endpoint.DeployedIndex(id='id_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].index_endpoint - mock_val = 'index_endpoint_value' - assert arg == mock_val - arg = args[0].deployed_index - mock_val = gca_index_endpoint.DeployedIndex(id='id_value') - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_mutate_deployed_index_flattened_error_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.mutate_deployed_index( - index_endpoint_service.MutateDeployedIndexRequest(), - index_endpoint='index_endpoint_value', - deployed_index=gca_index_endpoint.DeployedIndex(id='id_value'), - ) - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.IndexEndpointServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.IndexEndpointServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = IndexEndpointServiceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.IndexEndpointServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = IndexEndpointServiceClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.IndexEndpointServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = IndexEndpointServiceClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.IndexEndpointServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.IndexEndpointServiceGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.IndexEndpointServiceGrpcTransport, - transports.IndexEndpointServiceGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.IndexEndpointServiceGrpcTransport, - ) - -def test_index_endpoint_service_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.IndexEndpointServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_index_endpoint_service_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.aiplatform_v1beta1.services.index_endpoint_service.transports.IndexEndpointServiceTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.IndexEndpointServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'create_index_endpoint', - 'get_index_endpoint', - 'list_index_endpoints', - 'update_index_endpoint', - 'delete_index_endpoint', - 'deploy_index', - 'undeploy_index', - 'mutate_deployed_index', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - with pytest.raises(NotImplementedError): - transport.close() - - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client - - -def test_index_endpoint_service_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.index_endpoint_service.transports.IndexEndpointServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.IndexEndpointServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -def test_index_endpoint_service_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.index_endpoint_service.transports.IndexEndpointServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.IndexEndpointServiceTransport() - adc.assert_called_once() - - -def test_index_endpoint_service_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - IndexEndpointServiceClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.IndexEndpointServiceGrpcTransport, - transports.IndexEndpointServiceGrpcAsyncIOTransport, - ], -) -def test_index_endpoint_service_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.IndexEndpointServiceGrpcTransport, grpc_helpers), - (transports.IndexEndpointServiceGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_index_endpoint_service_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "aiplatform.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="aiplatform.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.IndexEndpointServiceGrpcTransport, transports.IndexEndpointServiceGrpcAsyncIOTransport]) -def test_index_endpoint_service_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_index_endpoint_service_host_no_port(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_index_endpoint_service_host_with_port(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:8000' - -def test_index_endpoint_service_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.IndexEndpointServiceGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_index_endpoint_service_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.IndexEndpointServiceGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.IndexEndpointServiceGrpcTransport, transports.IndexEndpointServiceGrpcAsyncIOTransport]) -def test_index_endpoint_service_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.IndexEndpointServiceGrpcTransport, transports.IndexEndpointServiceGrpcAsyncIOTransport]) -def test_index_endpoint_service_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_index_endpoint_service_grpc_lro_client(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_index_endpoint_service_grpc_lro_async_client(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc_asyncio', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsAsyncClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_index_path(): - project = "squid" - location = "clam" - index = "whelk" - expected = "projects/{project}/locations/{location}/indexes/{index}".format(project=project, location=location, index=index, ) - actual = IndexEndpointServiceClient.index_path(project, location, index) - assert expected == actual - - -def test_parse_index_path(): - expected = { - "project": "octopus", - "location": "oyster", - "index": "nudibranch", - } - path = IndexEndpointServiceClient.index_path(**expected) - - # Check that the path construction is reversible. - actual = IndexEndpointServiceClient.parse_index_path(path) - assert expected == actual - -def test_index_endpoint_path(): - project = "cuttlefish" - location = "mussel" - index_endpoint = "winkle" - expected = "projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}".format(project=project, location=location, index_endpoint=index_endpoint, ) - actual = IndexEndpointServiceClient.index_endpoint_path(project, location, index_endpoint) - assert expected == actual - - -def test_parse_index_endpoint_path(): - expected = { - "project": "nautilus", - "location": "scallop", - "index_endpoint": "abalone", - } - path = IndexEndpointServiceClient.index_endpoint_path(**expected) - - # Check that the path construction is reversible. - actual = IndexEndpointServiceClient.parse_index_endpoint_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "squid" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = IndexEndpointServiceClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "clam", - } - path = IndexEndpointServiceClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = IndexEndpointServiceClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "whelk" - expected = "folders/{folder}".format(folder=folder, ) - actual = IndexEndpointServiceClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "octopus", - } - path = IndexEndpointServiceClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = IndexEndpointServiceClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "oyster" - expected = "organizations/{organization}".format(organization=organization, ) - actual = IndexEndpointServiceClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "nudibranch", - } - path = IndexEndpointServiceClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = IndexEndpointServiceClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "cuttlefish" - expected = "projects/{project}".format(project=project, ) - actual = IndexEndpointServiceClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "mussel", - } - path = IndexEndpointServiceClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = IndexEndpointServiceClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "winkle" - location = "nautilus" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = IndexEndpointServiceClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "scallop", - "location": "abalone", - } - path = IndexEndpointServiceClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = IndexEndpointServiceClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.IndexEndpointServiceTransport, '_prep_wrapped_messages') as prep: - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.IndexEndpointServiceTransport, '_prep_wrapped_messages') as prep: - transport_class = IndexEndpointServiceClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - -@pytest.mark.asyncio -async def test_transport_close_async(): - client = IndexEndpointServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: - async with client: - close.assert_not_called() - close.assert_called_once() - -def test_transport_close(): - transports = { - "grpc": "_grpc_channel", - } - - for transport, close_name in transports.items(): - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: - with client: - close.assert_not_called() - close.assert_called_once() - -def test_client_ctx(): - transports = [ - 'grpc', - ] - for transport in transports: - client = IndexEndpointServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - # Test client calls underlying transport. - with mock.patch.object(type(client.transport), "close") as close: - close.assert_not_called() - with client: - pass - close.assert_called() diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_index_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_index_service.py deleted file mode 100644 index cc423f2c3a..0000000000 --- a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_index_service.py +++ /dev/null @@ -1,2381 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import future -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import operation_async # type: ignore -from google.api_core import operations_v1 -from google.api_core import path_template -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.aiplatform_v1beta1.services.index_service import IndexServiceAsyncClient -from google.cloud.aiplatform_v1beta1.services.index_service import IndexServiceClient -from google.cloud.aiplatform_v1beta1.services.index_service import pagers -from google.cloud.aiplatform_v1beta1.services.index_service import transports -from google.cloud.aiplatform_v1beta1.types import deployed_index_ref -from google.cloud.aiplatform_v1beta1.types import index -from google.cloud.aiplatform_v1beta1.types import index as gca_index -from google.cloud.aiplatform_v1beta1.types import index_service -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.longrunning import operations_pb2 -from google.oauth2 import service_account -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -import google.auth - - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert IndexServiceClient._get_default_mtls_endpoint(None) is None - assert IndexServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert IndexServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert IndexServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert IndexServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert IndexServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - IndexServiceClient, - IndexServiceAsyncClient, -]) -def test_index_service_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.IndexServiceGrpcTransport, "grpc"), - (transports.IndexServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_index_service_client_service_account_always_use_jwt(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=False) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("client_class", [ - IndexServiceClient, - IndexServiceAsyncClient, -]) -def test_index_service_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_index_service_client_get_transport_class(): - transport = IndexServiceClient.get_transport_class() - available_transports = [ - transports.IndexServiceGrpcTransport, - ] - assert transport in available_transports - - transport = IndexServiceClient.get_transport_class("grpc") - assert transport == transports.IndexServiceGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (IndexServiceClient, transports.IndexServiceGrpcTransport, "grpc"), - (IndexServiceAsyncClient, transports.IndexServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(IndexServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IndexServiceClient)) -@mock.patch.object(IndexServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IndexServiceAsyncClient)) -def test_index_service_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(IndexServiceClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(IndexServiceClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (IndexServiceClient, transports.IndexServiceGrpcTransport, "grpc", "true"), - (IndexServiceAsyncClient, transports.IndexServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (IndexServiceClient, transports.IndexServiceGrpcTransport, "grpc", "false"), - (IndexServiceAsyncClient, transports.IndexServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(IndexServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IndexServiceClient)) -@mock.patch.object(IndexServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(IndexServiceAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_index_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (IndexServiceClient, transports.IndexServiceGrpcTransport, "grpc"), - (IndexServiceAsyncClient, transports.IndexServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_index_service_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (IndexServiceClient, transports.IndexServiceGrpcTransport, "grpc"), - (IndexServiceAsyncClient, transports.IndexServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_index_service_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_index_service_client_client_options_from_dict(): - with mock.patch('google.cloud.aiplatform_v1beta1.services.index_service.transports.IndexServiceGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = IndexServiceClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_create_index(transport: str = 'grpc', request_type=index_service.CreateIndexRequest): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.create_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == index_service.CreateIndexRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_create_index_from_dict(): - test_create_index(request_type=dict) - - -def test_create_index_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_index), - '__call__') as call: - client.create_index() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == index_service.CreateIndexRequest() - - -@pytest.mark.asyncio -async def test_create_index_async(transport: str = 'grpc_asyncio', request_type=index_service.CreateIndexRequest): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.create_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == index_service.CreateIndexRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_create_index_async_from_dict(): - await test_create_index_async(request_type=dict) - - -def test_create_index_field_headers(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_service.CreateIndexRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_index), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.create_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_index_field_headers_async(): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_service.CreateIndexRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_index), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.create_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_index_flattened(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_index( - parent='parent_value', - index=gca_index.Index(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].index - mock_val = gca_index.Index(name='name_value') - assert arg == mock_val - - -def test_create_index_flattened_error(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_index( - index_service.CreateIndexRequest(), - parent='parent_value', - index=gca_index.Index(name='name_value'), - ) - - -@pytest.mark.asyncio -async def test_create_index_flattened_async(): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_index( - parent='parent_value', - index=gca_index.Index(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].index - mock_val = gca_index.Index(name='name_value') - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_index_flattened_error_async(): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_index( - index_service.CreateIndexRequest(), - parent='parent_value', - index=gca_index.Index(name='name_value'), - ) - - -def test_get_index(transport: str = 'grpc', request_type=index_service.GetIndexRequest): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = index.Index( - name='name_value', - display_name='display_name_value', - description='description_value', - metadata_schema_uri='metadata_schema_uri_value', - etag='etag_value', - ) - response = client.get_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == index_service.GetIndexRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, index.Index) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.metadata_schema_uri == 'metadata_schema_uri_value' - assert response.etag == 'etag_value' - - -def test_get_index_from_dict(): - test_get_index(request_type=dict) - - -def test_get_index_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_index), - '__call__') as call: - client.get_index() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == index_service.GetIndexRequest() - - -@pytest.mark.asyncio -async def test_get_index_async(transport: str = 'grpc_asyncio', request_type=index_service.GetIndexRequest): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(index.Index( - name='name_value', - display_name='display_name_value', - description='description_value', - metadata_schema_uri='metadata_schema_uri_value', - etag='etag_value', - )) - response = await client.get_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == index_service.GetIndexRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, index.Index) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.metadata_schema_uri == 'metadata_schema_uri_value' - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_get_index_async_from_dict(): - await test_get_index_async(request_type=dict) - - -def test_get_index_field_headers(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_service.GetIndexRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_index), - '__call__') as call: - call.return_value = index.Index() - client.get_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_index_field_headers_async(): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_service.GetIndexRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_index), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(index.Index()) - await client.get_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_index_flattened(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = index.Index() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_index( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_index_flattened_error(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_index( - index_service.GetIndexRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_index_flattened_async(): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = index.Index() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(index.Index()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_index( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_index_flattened_error_async(): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_index( - index_service.GetIndexRequest(), - name='name_value', - ) - - -def test_list_indexes(transport: str = 'grpc', request_type=index_service.ListIndexesRequest): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_indexes), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = index_service.ListIndexesResponse( - next_page_token='next_page_token_value', - ) - response = client.list_indexes(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == index_service.ListIndexesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListIndexesPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_indexes_from_dict(): - test_list_indexes(request_type=dict) - - -def test_list_indexes_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_indexes), - '__call__') as call: - client.list_indexes() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == index_service.ListIndexesRequest() - - -@pytest.mark.asyncio -async def test_list_indexes_async(transport: str = 'grpc_asyncio', request_type=index_service.ListIndexesRequest): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_indexes), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(index_service.ListIndexesResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_indexes(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == index_service.ListIndexesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListIndexesAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_indexes_async_from_dict(): - await test_list_indexes_async(request_type=dict) - - -def test_list_indexes_field_headers(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_service.ListIndexesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_indexes), - '__call__') as call: - call.return_value = index_service.ListIndexesResponse() - client.list_indexes(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_indexes_field_headers_async(): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_service.ListIndexesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_indexes), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(index_service.ListIndexesResponse()) - await client.list_indexes(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_indexes_flattened(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_indexes), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = index_service.ListIndexesResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_indexes( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_indexes_flattened_error(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_indexes( - index_service.ListIndexesRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_indexes_flattened_async(): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_indexes), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = index_service.ListIndexesResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(index_service.ListIndexesResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_indexes( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_indexes_flattened_error_async(): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_indexes( - index_service.ListIndexesRequest(), - parent='parent_value', - ) - - -def test_list_indexes_pager(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_indexes), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - index_service.ListIndexesResponse( - indexes=[ - index.Index(), - index.Index(), - index.Index(), - ], - next_page_token='abc', - ), - index_service.ListIndexesResponse( - indexes=[], - next_page_token='def', - ), - index_service.ListIndexesResponse( - indexes=[ - index.Index(), - ], - next_page_token='ghi', - ), - index_service.ListIndexesResponse( - indexes=[ - index.Index(), - index.Index(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_indexes(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, index.Index) - for i in results) - -def test_list_indexes_pages(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_indexes), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - index_service.ListIndexesResponse( - indexes=[ - index.Index(), - index.Index(), - index.Index(), - ], - next_page_token='abc', - ), - index_service.ListIndexesResponse( - indexes=[], - next_page_token='def', - ), - index_service.ListIndexesResponse( - indexes=[ - index.Index(), - ], - next_page_token='ghi', - ), - index_service.ListIndexesResponse( - indexes=[ - index.Index(), - index.Index(), - ], - ), - RuntimeError, - ) - pages = list(client.list_indexes(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_indexes_async_pager(): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_indexes), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - index_service.ListIndexesResponse( - indexes=[ - index.Index(), - index.Index(), - index.Index(), - ], - next_page_token='abc', - ), - index_service.ListIndexesResponse( - indexes=[], - next_page_token='def', - ), - index_service.ListIndexesResponse( - indexes=[ - index.Index(), - ], - next_page_token='ghi', - ), - index_service.ListIndexesResponse( - indexes=[ - index.Index(), - index.Index(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_indexes(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, index.Index) - for i in responses) - -@pytest.mark.asyncio -async def test_list_indexes_async_pages(): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_indexes), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - index_service.ListIndexesResponse( - indexes=[ - index.Index(), - index.Index(), - index.Index(), - ], - next_page_token='abc', - ), - index_service.ListIndexesResponse( - indexes=[], - next_page_token='def', - ), - index_service.ListIndexesResponse( - indexes=[ - index.Index(), - ], - next_page_token='ghi', - ), - index_service.ListIndexesResponse( - indexes=[ - index.Index(), - index.Index(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_indexes(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_update_index(transport: str = 'grpc', request_type=index_service.UpdateIndexRequest): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.update_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == index_service.UpdateIndexRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_update_index_from_dict(): - test_update_index(request_type=dict) - - -def test_update_index_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_index), - '__call__') as call: - client.update_index() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == index_service.UpdateIndexRequest() - - -@pytest.mark.asyncio -async def test_update_index_async(transport: str = 'grpc_asyncio', request_type=index_service.UpdateIndexRequest): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.update_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == index_service.UpdateIndexRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_update_index_async_from_dict(): - await test_update_index_async(request_type=dict) - - -def test_update_index_field_headers(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_service.UpdateIndexRequest() - - request.index.name = 'index.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_index), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.update_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'index.name=index.name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_index_field_headers_async(): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_service.UpdateIndexRequest() - - request.index.name = 'index.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_index), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.update_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'index.name=index.name/value', - ) in kw['metadata'] - - -def test_update_index_flattened(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_index( - index=gca_index.Index(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].index - mock_val = gca_index.Index(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -def test_update_index_flattened_error(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_index( - index_service.UpdateIndexRequest(), - index=gca_index.Index(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.asyncio -async def test_update_index_flattened_async(): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_index( - index=gca_index.Index(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].index - mock_val = gca_index.Index(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_update_index_flattened_error_async(): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_index( - index_service.UpdateIndexRequest(), - index=gca_index.Index(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_delete_index(transport: str = 'grpc', request_type=index_service.DeleteIndexRequest): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == index_service.DeleteIndexRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_index_from_dict(): - test_delete_index(request_type=dict) - - -def test_delete_index_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_index), - '__call__') as call: - client.delete_index() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == index_service.DeleteIndexRequest() - - -@pytest.mark.asyncio -async def test_delete_index_async(transport: str = 'grpc_asyncio', request_type=index_service.DeleteIndexRequest): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == index_service.DeleteIndexRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_index_async_from_dict(): - await test_delete_index_async(request_type=dict) - - -def test_delete_index_field_headers(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_service.DeleteIndexRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_index), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_index_field_headers_async(): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = index_service.DeleteIndexRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_index), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_index(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_index_flattened(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_index( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_delete_index_flattened_error(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_index( - index_service.DeleteIndexRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_index_flattened_async(): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_index), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_index( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_index_flattened_error_async(): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_index( - index_service.DeleteIndexRequest(), - name='name_value', - ) - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.IndexServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.IndexServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = IndexServiceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.IndexServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = IndexServiceClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.IndexServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = IndexServiceClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.IndexServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.IndexServiceGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.IndexServiceGrpcTransport, - transports.IndexServiceGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.IndexServiceGrpcTransport, - ) - -def test_index_service_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.IndexServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_index_service_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.aiplatform_v1beta1.services.index_service.transports.IndexServiceTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.IndexServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'create_index', - 'get_index', - 'list_indexes', - 'update_index', - 'delete_index', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - with pytest.raises(NotImplementedError): - transport.close() - - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client - - -def test_index_service_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.index_service.transports.IndexServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.IndexServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -def test_index_service_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.index_service.transports.IndexServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.IndexServiceTransport() - adc.assert_called_once() - - -def test_index_service_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - IndexServiceClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.IndexServiceGrpcTransport, - transports.IndexServiceGrpcAsyncIOTransport, - ], -) -def test_index_service_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.IndexServiceGrpcTransport, grpc_helpers), - (transports.IndexServiceGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_index_service_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "aiplatform.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="aiplatform.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.IndexServiceGrpcTransport, transports.IndexServiceGrpcAsyncIOTransport]) -def test_index_service_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_index_service_host_no_port(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_index_service_host_with_port(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:8000' - -def test_index_service_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.IndexServiceGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_index_service_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.IndexServiceGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.IndexServiceGrpcTransport, transports.IndexServiceGrpcAsyncIOTransport]) -def test_index_service_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.IndexServiceGrpcTransport, transports.IndexServiceGrpcAsyncIOTransport]) -def test_index_service_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_index_service_grpc_lro_client(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_index_service_grpc_lro_async_client(): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc_asyncio', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsAsyncClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_index_path(): - project = "squid" - location = "clam" - index = "whelk" - expected = "projects/{project}/locations/{location}/indexes/{index}".format(project=project, location=location, index=index, ) - actual = IndexServiceClient.index_path(project, location, index) - assert expected == actual - - -def test_parse_index_path(): - expected = { - "project": "octopus", - "location": "oyster", - "index": "nudibranch", - } - path = IndexServiceClient.index_path(**expected) - - # Check that the path construction is reversible. - actual = IndexServiceClient.parse_index_path(path) - assert expected == actual - -def test_index_endpoint_path(): - project = "cuttlefish" - location = "mussel" - index_endpoint = "winkle" - expected = "projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}".format(project=project, location=location, index_endpoint=index_endpoint, ) - actual = IndexServiceClient.index_endpoint_path(project, location, index_endpoint) - assert expected == actual - - -def test_parse_index_endpoint_path(): - expected = { - "project": "nautilus", - "location": "scallop", - "index_endpoint": "abalone", - } - path = IndexServiceClient.index_endpoint_path(**expected) - - # Check that the path construction is reversible. - actual = IndexServiceClient.parse_index_endpoint_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "squid" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = IndexServiceClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "clam", - } - path = IndexServiceClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = IndexServiceClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "whelk" - expected = "folders/{folder}".format(folder=folder, ) - actual = IndexServiceClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "octopus", - } - path = IndexServiceClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = IndexServiceClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "oyster" - expected = "organizations/{organization}".format(organization=organization, ) - actual = IndexServiceClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "nudibranch", - } - path = IndexServiceClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = IndexServiceClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "cuttlefish" - expected = "projects/{project}".format(project=project, ) - actual = IndexServiceClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "mussel", - } - path = IndexServiceClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = IndexServiceClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "winkle" - location = "nautilus" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = IndexServiceClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "scallop", - "location": "abalone", - } - path = IndexServiceClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = IndexServiceClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.IndexServiceTransport, '_prep_wrapped_messages') as prep: - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.IndexServiceTransport, '_prep_wrapped_messages') as prep: - transport_class = IndexServiceClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - -@pytest.mark.asyncio -async def test_transport_close_async(): - client = IndexServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: - async with client: - close.assert_not_called() - close.assert_called_once() - -def test_transport_close(): - transports = { - "grpc": "_grpc_channel", - } - - for transport, close_name in transports.items(): - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: - with client: - close.assert_not_called() - close.assert_called_once() - -def test_client_ctx(): - transports = [ - 'grpc', - ] - for transport in transports: - client = IndexServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - # Test client calls underlying transport. - with mock.patch.object(type(client.transport), "close") as close: - close.assert_not_called() - with client: - pass - close.assert_called() diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_job_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_job_service.py deleted file mode 100644 index 588f781edd..0000000000 --- a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_job_service.py +++ /dev/null @@ -1,9153 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import future -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import operation_async # type: ignore -from google.api_core import operations_v1 -from google.api_core import path_template -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.aiplatform_v1beta1.services.job_service import JobServiceAsyncClient -from google.cloud.aiplatform_v1beta1.services.job_service import JobServiceClient -from google.cloud.aiplatform_v1beta1.services.job_service import pagers -from google.cloud.aiplatform_v1beta1.services.job_service import transports -from google.cloud.aiplatform_v1beta1.types import accelerator_type -from google.cloud.aiplatform_v1beta1.types import batch_prediction_job -from google.cloud.aiplatform_v1beta1.types import batch_prediction_job as gca_batch_prediction_job -from google.cloud.aiplatform_v1beta1.types import completion_stats -from google.cloud.aiplatform_v1beta1.types import custom_job -from google.cloud.aiplatform_v1beta1.types import custom_job as gca_custom_job -from google.cloud.aiplatform_v1beta1.types import data_labeling_job -from google.cloud.aiplatform_v1beta1.types import data_labeling_job as gca_data_labeling_job -from google.cloud.aiplatform_v1beta1.types import encryption_spec -from google.cloud.aiplatform_v1beta1.types import env_var -from google.cloud.aiplatform_v1beta1.types import explanation -from google.cloud.aiplatform_v1beta1.types import explanation_metadata -from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job -from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job as gca_hyperparameter_tuning_job -from google.cloud.aiplatform_v1beta1.types import io -from google.cloud.aiplatform_v1beta1.types import job_service -from google.cloud.aiplatform_v1beta1.types import job_state -from google.cloud.aiplatform_v1beta1.types import machine_resources -from google.cloud.aiplatform_v1beta1.types import manual_batch_tuning_parameters -from google.cloud.aiplatform_v1beta1.types import model -from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job -from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job as gca_model_deployment_monitoring_job -from google.cloud.aiplatform_v1beta1.types import model_monitoring -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.cloud.aiplatform_v1beta1.types import study -from google.cloud.aiplatform_v1beta1.types import unmanaged_container_model -from google.longrunning import operations_pb2 -from google.oauth2 import service_account -from google.protobuf import any_pb2 # type: ignore -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore -from google.type import money_pb2 # type: ignore -import google.auth - - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert JobServiceClient._get_default_mtls_endpoint(None) is None - assert JobServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert JobServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert JobServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert JobServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert JobServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - JobServiceClient, - JobServiceAsyncClient, -]) -def test_job_service_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.JobServiceGrpcTransport, "grpc"), - (transports.JobServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_job_service_client_service_account_always_use_jwt(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=False) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("client_class", [ - JobServiceClient, - JobServiceAsyncClient, -]) -def test_job_service_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_job_service_client_get_transport_class(): - transport = JobServiceClient.get_transport_class() - available_transports = [ - transports.JobServiceGrpcTransport, - ] - assert transport in available_transports - - transport = JobServiceClient.get_transport_class("grpc") - assert transport == transports.JobServiceGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (JobServiceClient, transports.JobServiceGrpcTransport, "grpc"), - (JobServiceAsyncClient, transports.JobServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(JobServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobServiceClient)) -@mock.patch.object(JobServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobServiceAsyncClient)) -def test_job_service_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(JobServiceClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(JobServiceClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (JobServiceClient, transports.JobServiceGrpcTransport, "grpc", "true"), - (JobServiceAsyncClient, transports.JobServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (JobServiceClient, transports.JobServiceGrpcTransport, "grpc", "false"), - (JobServiceAsyncClient, transports.JobServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(JobServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobServiceClient)) -@mock.patch.object(JobServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobServiceAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_job_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (JobServiceClient, transports.JobServiceGrpcTransport, "grpc"), - (JobServiceAsyncClient, transports.JobServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_job_service_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (JobServiceClient, transports.JobServiceGrpcTransport, "grpc"), - (JobServiceAsyncClient, transports.JobServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_job_service_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_job_service_client_client_options_from_dict(): - with mock.patch('google.cloud.aiplatform_v1beta1.services.job_service.transports.JobServiceGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = JobServiceClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_create_custom_job(transport: str = 'grpc', request_type=job_service.CreateCustomJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_custom_job.CustomJob( - name='name_value', - display_name='display_name_value', - state=job_state.JobState.JOB_STATE_QUEUED, - ) - response = client.create_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CreateCustomJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_custom_job.CustomJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == job_state.JobState.JOB_STATE_QUEUED - - -def test_create_custom_job_from_dict(): - test_create_custom_job(request_type=dict) - - -def test_create_custom_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_custom_job), - '__call__') as call: - client.create_custom_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CreateCustomJobRequest() - - -@pytest.mark.asyncio -async def test_create_custom_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CreateCustomJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_custom_job.CustomJob( - name='name_value', - display_name='display_name_value', - state=job_state.JobState.JOB_STATE_QUEUED, - )) - response = await client.create_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CreateCustomJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_custom_job.CustomJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == job_state.JobState.JOB_STATE_QUEUED - - -@pytest.mark.asyncio -async def test_create_custom_job_async_from_dict(): - await test_create_custom_job_async(request_type=dict) - - -def test_create_custom_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CreateCustomJobRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_custom_job), - '__call__') as call: - call.return_value = gca_custom_job.CustomJob() - client.create_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_custom_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CreateCustomJobRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_custom_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_custom_job.CustomJob()) - await client.create_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_custom_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_custom_job.CustomJob() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_custom_job( - parent='parent_value', - custom_job=gca_custom_job.CustomJob(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].custom_job - mock_val = gca_custom_job.CustomJob(name='name_value') - assert arg == mock_val - - -def test_create_custom_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_custom_job( - job_service.CreateCustomJobRequest(), - parent='parent_value', - custom_job=gca_custom_job.CustomJob(name='name_value'), - ) - - -@pytest.mark.asyncio -async def test_create_custom_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_custom_job.CustomJob() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_custom_job.CustomJob()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_custom_job( - parent='parent_value', - custom_job=gca_custom_job.CustomJob(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].custom_job - mock_val = gca_custom_job.CustomJob(name='name_value') - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_custom_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_custom_job( - job_service.CreateCustomJobRequest(), - parent='parent_value', - custom_job=gca_custom_job.CustomJob(name='name_value'), - ) - - -def test_get_custom_job(transport: str = 'grpc', request_type=job_service.GetCustomJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = custom_job.CustomJob( - name='name_value', - display_name='display_name_value', - state=job_state.JobState.JOB_STATE_QUEUED, - ) - response = client.get_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.GetCustomJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, custom_job.CustomJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == job_state.JobState.JOB_STATE_QUEUED - - -def test_get_custom_job_from_dict(): - test_get_custom_job(request_type=dict) - - -def test_get_custom_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_custom_job), - '__call__') as call: - client.get_custom_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.GetCustomJobRequest() - - -@pytest.mark.asyncio -async def test_get_custom_job_async(transport: str = 'grpc_asyncio', request_type=job_service.GetCustomJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(custom_job.CustomJob( - name='name_value', - display_name='display_name_value', - state=job_state.JobState.JOB_STATE_QUEUED, - )) - response = await client.get_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.GetCustomJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, custom_job.CustomJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == job_state.JobState.JOB_STATE_QUEUED - - -@pytest.mark.asyncio -async def test_get_custom_job_async_from_dict(): - await test_get_custom_job_async(request_type=dict) - - -def test_get_custom_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.GetCustomJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_custom_job), - '__call__') as call: - call.return_value = custom_job.CustomJob() - client.get_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_custom_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.GetCustomJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_custom_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(custom_job.CustomJob()) - await client.get_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_custom_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = custom_job.CustomJob() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_custom_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_custom_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_custom_job( - job_service.GetCustomJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_custom_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = custom_job.CustomJob() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(custom_job.CustomJob()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_custom_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_custom_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_custom_job( - job_service.GetCustomJobRequest(), - name='name_value', - ) - - -def test_list_custom_jobs(transport: str = 'grpc', request_type=job_service.ListCustomJobsRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_custom_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.ListCustomJobsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_custom_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ListCustomJobsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListCustomJobsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_custom_jobs_from_dict(): - test_list_custom_jobs(request_type=dict) - - -def test_list_custom_jobs_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_custom_jobs), - '__call__') as call: - client.list_custom_jobs() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ListCustomJobsRequest() - - -@pytest.mark.asyncio -async def test_list_custom_jobs_async(transport: str = 'grpc_asyncio', request_type=job_service.ListCustomJobsRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_custom_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListCustomJobsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_custom_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ListCustomJobsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListCustomJobsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_custom_jobs_async_from_dict(): - await test_list_custom_jobs_async(request_type=dict) - - -def test_list_custom_jobs_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.ListCustomJobsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_custom_jobs), - '__call__') as call: - call.return_value = job_service.ListCustomJobsResponse() - client.list_custom_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_custom_jobs_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.ListCustomJobsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_custom_jobs), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListCustomJobsResponse()) - await client.list_custom_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_custom_jobs_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_custom_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.ListCustomJobsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_custom_jobs( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_custom_jobs_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_custom_jobs( - job_service.ListCustomJobsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_custom_jobs_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_custom_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.ListCustomJobsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListCustomJobsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_custom_jobs( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_custom_jobs_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_custom_jobs( - job_service.ListCustomJobsRequest(), - parent='parent_value', - ) - - -def test_list_custom_jobs_pager(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_custom_jobs), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListCustomJobsResponse( - custom_jobs=[ - custom_job.CustomJob(), - custom_job.CustomJob(), - custom_job.CustomJob(), - ], - next_page_token='abc', - ), - job_service.ListCustomJobsResponse( - custom_jobs=[], - next_page_token='def', - ), - job_service.ListCustomJobsResponse( - custom_jobs=[ - custom_job.CustomJob(), - ], - next_page_token='ghi', - ), - job_service.ListCustomJobsResponse( - custom_jobs=[ - custom_job.CustomJob(), - custom_job.CustomJob(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_custom_jobs(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, custom_job.CustomJob) - for i in results) - -def test_list_custom_jobs_pages(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_custom_jobs), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListCustomJobsResponse( - custom_jobs=[ - custom_job.CustomJob(), - custom_job.CustomJob(), - custom_job.CustomJob(), - ], - next_page_token='abc', - ), - job_service.ListCustomJobsResponse( - custom_jobs=[], - next_page_token='def', - ), - job_service.ListCustomJobsResponse( - custom_jobs=[ - custom_job.CustomJob(), - ], - next_page_token='ghi', - ), - job_service.ListCustomJobsResponse( - custom_jobs=[ - custom_job.CustomJob(), - custom_job.CustomJob(), - ], - ), - RuntimeError, - ) - pages = list(client.list_custom_jobs(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_custom_jobs_async_pager(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_custom_jobs), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListCustomJobsResponse( - custom_jobs=[ - custom_job.CustomJob(), - custom_job.CustomJob(), - custom_job.CustomJob(), - ], - next_page_token='abc', - ), - job_service.ListCustomJobsResponse( - custom_jobs=[], - next_page_token='def', - ), - job_service.ListCustomJobsResponse( - custom_jobs=[ - custom_job.CustomJob(), - ], - next_page_token='ghi', - ), - job_service.ListCustomJobsResponse( - custom_jobs=[ - custom_job.CustomJob(), - custom_job.CustomJob(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_custom_jobs(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, custom_job.CustomJob) - for i in responses) - -@pytest.mark.asyncio -async def test_list_custom_jobs_async_pages(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_custom_jobs), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListCustomJobsResponse( - custom_jobs=[ - custom_job.CustomJob(), - custom_job.CustomJob(), - custom_job.CustomJob(), - ], - next_page_token='abc', - ), - job_service.ListCustomJobsResponse( - custom_jobs=[], - next_page_token='def', - ), - job_service.ListCustomJobsResponse( - custom_jobs=[ - custom_job.CustomJob(), - ], - next_page_token='ghi', - ), - job_service.ListCustomJobsResponse( - custom_jobs=[ - custom_job.CustomJob(), - custom_job.CustomJob(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_custom_jobs(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_delete_custom_job(transport: str = 'grpc', request_type=job_service.DeleteCustomJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.DeleteCustomJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_custom_job_from_dict(): - test_delete_custom_job(request_type=dict) - - -def test_delete_custom_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_custom_job), - '__call__') as call: - client.delete_custom_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.DeleteCustomJobRequest() - - -@pytest.mark.asyncio -async def test_delete_custom_job_async(transport: str = 'grpc_asyncio', request_type=job_service.DeleteCustomJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.DeleteCustomJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_custom_job_async_from_dict(): - await test_delete_custom_job_async(request_type=dict) - - -def test_delete_custom_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.DeleteCustomJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_custom_job), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_custom_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.DeleteCustomJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_custom_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_custom_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_custom_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_delete_custom_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_custom_job( - job_service.DeleteCustomJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_custom_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_custom_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_custom_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_custom_job( - job_service.DeleteCustomJobRequest(), - name='name_value', - ) - - -def test_cancel_custom_job(transport: str = 'grpc', request_type=job_service.CancelCustomJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.cancel_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CancelCustomJobRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -def test_cancel_custom_job_from_dict(): - test_cancel_custom_job(request_type=dict) - - -def test_cancel_custom_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_custom_job), - '__call__') as call: - client.cancel_custom_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CancelCustomJobRequest() - - -@pytest.mark.asyncio -async def test_cancel_custom_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CancelCustomJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.cancel_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CancelCustomJobRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_cancel_custom_job_async_from_dict(): - await test_cancel_custom_job_async(request_type=dict) - - -def test_cancel_custom_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CancelCustomJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_custom_job), - '__call__') as call: - call.return_value = None - client.cancel_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_cancel_custom_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CancelCustomJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_custom_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.cancel_custom_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_cancel_custom_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.cancel_custom_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_cancel_custom_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.cancel_custom_job( - job_service.CancelCustomJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_cancel_custom_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_custom_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.cancel_custom_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_cancel_custom_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.cancel_custom_job( - job_service.CancelCustomJobRequest(), - name='name_value', - ) - - -def test_create_data_labeling_job(transport: str = 'grpc', request_type=job_service.CreateDataLabelingJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_data_labeling_job.DataLabelingJob( - name='name_value', - display_name='display_name_value', - datasets=['datasets_value'], - labeler_count=1375, - instruction_uri='instruction_uri_value', - inputs_schema_uri='inputs_schema_uri_value', - state=job_state.JobState.JOB_STATE_QUEUED, - labeling_progress=1810, - specialist_pools=['specialist_pools_value'], - ) - response = client.create_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CreateDataLabelingJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_data_labeling_job.DataLabelingJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.datasets == ['datasets_value'] - assert response.labeler_count == 1375 - assert response.instruction_uri == 'instruction_uri_value' - assert response.inputs_schema_uri == 'inputs_schema_uri_value' - assert response.state == job_state.JobState.JOB_STATE_QUEUED - assert response.labeling_progress == 1810 - assert response.specialist_pools == ['specialist_pools_value'] - - -def test_create_data_labeling_job_from_dict(): - test_create_data_labeling_job(request_type=dict) - - -def test_create_data_labeling_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_data_labeling_job), - '__call__') as call: - client.create_data_labeling_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CreateDataLabelingJobRequest() - - -@pytest.mark.asyncio -async def test_create_data_labeling_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CreateDataLabelingJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_data_labeling_job.DataLabelingJob( - name='name_value', - display_name='display_name_value', - datasets=['datasets_value'], - labeler_count=1375, - instruction_uri='instruction_uri_value', - inputs_schema_uri='inputs_schema_uri_value', - state=job_state.JobState.JOB_STATE_QUEUED, - labeling_progress=1810, - specialist_pools=['specialist_pools_value'], - )) - response = await client.create_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CreateDataLabelingJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_data_labeling_job.DataLabelingJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.datasets == ['datasets_value'] - assert response.labeler_count == 1375 - assert response.instruction_uri == 'instruction_uri_value' - assert response.inputs_schema_uri == 'inputs_schema_uri_value' - assert response.state == job_state.JobState.JOB_STATE_QUEUED - assert response.labeling_progress == 1810 - assert response.specialist_pools == ['specialist_pools_value'] - - -@pytest.mark.asyncio -async def test_create_data_labeling_job_async_from_dict(): - await test_create_data_labeling_job_async(request_type=dict) - - -def test_create_data_labeling_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CreateDataLabelingJobRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_data_labeling_job), - '__call__') as call: - call.return_value = gca_data_labeling_job.DataLabelingJob() - client.create_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_data_labeling_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CreateDataLabelingJobRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_data_labeling_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_data_labeling_job.DataLabelingJob()) - await client.create_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_data_labeling_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_data_labeling_job.DataLabelingJob() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_data_labeling_job( - parent='parent_value', - data_labeling_job=gca_data_labeling_job.DataLabelingJob(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].data_labeling_job - mock_val = gca_data_labeling_job.DataLabelingJob(name='name_value') - assert arg == mock_val - - -def test_create_data_labeling_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_data_labeling_job( - job_service.CreateDataLabelingJobRequest(), - parent='parent_value', - data_labeling_job=gca_data_labeling_job.DataLabelingJob(name='name_value'), - ) - - -@pytest.mark.asyncio -async def test_create_data_labeling_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_data_labeling_job.DataLabelingJob() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_data_labeling_job.DataLabelingJob()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_data_labeling_job( - parent='parent_value', - data_labeling_job=gca_data_labeling_job.DataLabelingJob(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].data_labeling_job - mock_val = gca_data_labeling_job.DataLabelingJob(name='name_value') - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_data_labeling_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_data_labeling_job( - job_service.CreateDataLabelingJobRequest(), - parent='parent_value', - data_labeling_job=gca_data_labeling_job.DataLabelingJob(name='name_value'), - ) - - -def test_get_data_labeling_job(transport: str = 'grpc', request_type=job_service.GetDataLabelingJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = data_labeling_job.DataLabelingJob( - name='name_value', - display_name='display_name_value', - datasets=['datasets_value'], - labeler_count=1375, - instruction_uri='instruction_uri_value', - inputs_schema_uri='inputs_schema_uri_value', - state=job_state.JobState.JOB_STATE_QUEUED, - labeling_progress=1810, - specialist_pools=['specialist_pools_value'], - ) - response = client.get_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.GetDataLabelingJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, data_labeling_job.DataLabelingJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.datasets == ['datasets_value'] - assert response.labeler_count == 1375 - assert response.instruction_uri == 'instruction_uri_value' - assert response.inputs_schema_uri == 'inputs_schema_uri_value' - assert response.state == job_state.JobState.JOB_STATE_QUEUED - assert response.labeling_progress == 1810 - assert response.specialist_pools == ['specialist_pools_value'] - - -def test_get_data_labeling_job_from_dict(): - test_get_data_labeling_job(request_type=dict) - - -def test_get_data_labeling_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_data_labeling_job), - '__call__') as call: - client.get_data_labeling_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.GetDataLabelingJobRequest() - - -@pytest.mark.asyncio -async def test_get_data_labeling_job_async(transport: str = 'grpc_asyncio', request_type=job_service.GetDataLabelingJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(data_labeling_job.DataLabelingJob( - name='name_value', - display_name='display_name_value', - datasets=['datasets_value'], - labeler_count=1375, - instruction_uri='instruction_uri_value', - inputs_schema_uri='inputs_schema_uri_value', - state=job_state.JobState.JOB_STATE_QUEUED, - labeling_progress=1810, - specialist_pools=['specialist_pools_value'], - )) - response = await client.get_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.GetDataLabelingJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, data_labeling_job.DataLabelingJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.datasets == ['datasets_value'] - assert response.labeler_count == 1375 - assert response.instruction_uri == 'instruction_uri_value' - assert response.inputs_schema_uri == 'inputs_schema_uri_value' - assert response.state == job_state.JobState.JOB_STATE_QUEUED - assert response.labeling_progress == 1810 - assert response.specialist_pools == ['specialist_pools_value'] - - -@pytest.mark.asyncio -async def test_get_data_labeling_job_async_from_dict(): - await test_get_data_labeling_job_async(request_type=dict) - - -def test_get_data_labeling_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.GetDataLabelingJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_data_labeling_job), - '__call__') as call: - call.return_value = data_labeling_job.DataLabelingJob() - client.get_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_data_labeling_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.GetDataLabelingJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_data_labeling_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(data_labeling_job.DataLabelingJob()) - await client.get_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_data_labeling_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = data_labeling_job.DataLabelingJob() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_data_labeling_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_data_labeling_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_data_labeling_job( - job_service.GetDataLabelingJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_data_labeling_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = data_labeling_job.DataLabelingJob() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(data_labeling_job.DataLabelingJob()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_data_labeling_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_data_labeling_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_data_labeling_job( - job_service.GetDataLabelingJobRequest(), - name='name_value', - ) - - -def test_list_data_labeling_jobs(transport: str = 'grpc', request_type=job_service.ListDataLabelingJobsRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_labeling_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.ListDataLabelingJobsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_data_labeling_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ListDataLabelingJobsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListDataLabelingJobsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_data_labeling_jobs_from_dict(): - test_list_data_labeling_jobs(request_type=dict) - - -def test_list_data_labeling_jobs_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_labeling_jobs), - '__call__') as call: - client.list_data_labeling_jobs() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ListDataLabelingJobsRequest() - - -@pytest.mark.asyncio -async def test_list_data_labeling_jobs_async(transport: str = 'grpc_asyncio', request_type=job_service.ListDataLabelingJobsRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_labeling_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListDataLabelingJobsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_data_labeling_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ListDataLabelingJobsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListDataLabelingJobsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_data_labeling_jobs_async_from_dict(): - await test_list_data_labeling_jobs_async(request_type=dict) - - -def test_list_data_labeling_jobs_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.ListDataLabelingJobsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_labeling_jobs), - '__call__') as call: - call.return_value = job_service.ListDataLabelingJobsResponse() - client.list_data_labeling_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_data_labeling_jobs_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.ListDataLabelingJobsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_labeling_jobs), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListDataLabelingJobsResponse()) - await client.list_data_labeling_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_data_labeling_jobs_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_labeling_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.ListDataLabelingJobsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_data_labeling_jobs( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_data_labeling_jobs_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_data_labeling_jobs( - job_service.ListDataLabelingJobsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_data_labeling_jobs_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_labeling_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.ListDataLabelingJobsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListDataLabelingJobsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_data_labeling_jobs( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_data_labeling_jobs_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_data_labeling_jobs( - job_service.ListDataLabelingJobsRequest(), - parent='parent_value', - ) - - -def test_list_data_labeling_jobs_pager(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_labeling_jobs), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[ - data_labeling_job.DataLabelingJob(), - data_labeling_job.DataLabelingJob(), - data_labeling_job.DataLabelingJob(), - ], - next_page_token='abc', - ), - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[], - next_page_token='def', - ), - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[ - data_labeling_job.DataLabelingJob(), - ], - next_page_token='ghi', - ), - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[ - data_labeling_job.DataLabelingJob(), - data_labeling_job.DataLabelingJob(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_data_labeling_jobs(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, data_labeling_job.DataLabelingJob) - for i in results) - -def test_list_data_labeling_jobs_pages(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_labeling_jobs), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[ - data_labeling_job.DataLabelingJob(), - data_labeling_job.DataLabelingJob(), - data_labeling_job.DataLabelingJob(), - ], - next_page_token='abc', - ), - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[], - next_page_token='def', - ), - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[ - data_labeling_job.DataLabelingJob(), - ], - next_page_token='ghi', - ), - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[ - data_labeling_job.DataLabelingJob(), - data_labeling_job.DataLabelingJob(), - ], - ), - RuntimeError, - ) - pages = list(client.list_data_labeling_jobs(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_data_labeling_jobs_async_pager(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_labeling_jobs), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[ - data_labeling_job.DataLabelingJob(), - data_labeling_job.DataLabelingJob(), - data_labeling_job.DataLabelingJob(), - ], - next_page_token='abc', - ), - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[], - next_page_token='def', - ), - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[ - data_labeling_job.DataLabelingJob(), - ], - next_page_token='ghi', - ), - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[ - data_labeling_job.DataLabelingJob(), - data_labeling_job.DataLabelingJob(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_data_labeling_jobs(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, data_labeling_job.DataLabelingJob) - for i in responses) - -@pytest.mark.asyncio -async def test_list_data_labeling_jobs_async_pages(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_data_labeling_jobs), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[ - data_labeling_job.DataLabelingJob(), - data_labeling_job.DataLabelingJob(), - data_labeling_job.DataLabelingJob(), - ], - next_page_token='abc', - ), - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[], - next_page_token='def', - ), - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[ - data_labeling_job.DataLabelingJob(), - ], - next_page_token='ghi', - ), - job_service.ListDataLabelingJobsResponse( - data_labeling_jobs=[ - data_labeling_job.DataLabelingJob(), - data_labeling_job.DataLabelingJob(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_data_labeling_jobs(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_delete_data_labeling_job(transport: str = 'grpc', request_type=job_service.DeleteDataLabelingJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.DeleteDataLabelingJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_data_labeling_job_from_dict(): - test_delete_data_labeling_job(request_type=dict) - - -def test_delete_data_labeling_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_data_labeling_job), - '__call__') as call: - client.delete_data_labeling_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.DeleteDataLabelingJobRequest() - - -@pytest.mark.asyncio -async def test_delete_data_labeling_job_async(transport: str = 'grpc_asyncio', request_type=job_service.DeleteDataLabelingJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.DeleteDataLabelingJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_data_labeling_job_async_from_dict(): - await test_delete_data_labeling_job_async(request_type=dict) - - -def test_delete_data_labeling_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.DeleteDataLabelingJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_data_labeling_job), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_data_labeling_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.DeleteDataLabelingJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_data_labeling_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_data_labeling_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_data_labeling_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_delete_data_labeling_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_data_labeling_job( - job_service.DeleteDataLabelingJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_data_labeling_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_data_labeling_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_data_labeling_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_data_labeling_job( - job_service.DeleteDataLabelingJobRequest(), - name='name_value', - ) - - -def test_cancel_data_labeling_job(transport: str = 'grpc', request_type=job_service.CancelDataLabelingJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.cancel_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CancelDataLabelingJobRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -def test_cancel_data_labeling_job_from_dict(): - test_cancel_data_labeling_job(request_type=dict) - - -def test_cancel_data_labeling_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_data_labeling_job), - '__call__') as call: - client.cancel_data_labeling_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CancelDataLabelingJobRequest() - - -@pytest.mark.asyncio -async def test_cancel_data_labeling_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CancelDataLabelingJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.cancel_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CancelDataLabelingJobRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_cancel_data_labeling_job_async_from_dict(): - await test_cancel_data_labeling_job_async(request_type=dict) - - -def test_cancel_data_labeling_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CancelDataLabelingJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_data_labeling_job), - '__call__') as call: - call.return_value = None - client.cancel_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_cancel_data_labeling_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CancelDataLabelingJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_data_labeling_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.cancel_data_labeling_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_cancel_data_labeling_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.cancel_data_labeling_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_cancel_data_labeling_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.cancel_data_labeling_job( - job_service.CancelDataLabelingJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_cancel_data_labeling_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_data_labeling_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.cancel_data_labeling_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_cancel_data_labeling_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.cancel_data_labeling_job( - job_service.CancelDataLabelingJobRequest(), - name='name_value', - ) - - -def test_create_hyperparameter_tuning_job(transport: str = 'grpc', request_type=job_service.CreateHyperparameterTuningJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_hyperparameter_tuning_job.HyperparameterTuningJob( - name='name_value', - display_name='display_name_value', - max_trial_count=1609, - parallel_trial_count=2128, - max_failed_trial_count=2317, - state=job_state.JobState.JOB_STATE_QUEUED, - ) - response = client.create_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CreateHyperparameterTuningJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_hyperparameter_tuning_job.HyperparameterTuningJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.max_trial_count == 1609 - assert response.parallel_trial_count == 2128 - assert response.max_failed_trial_count == 2317 - assert response.state == job_state.JobState.JOB_STATE_QUEUED - - -def test_create_hyperparameter_tuning_job_from_dict(): - test_create_hyperparameter_tuning_job(request_type=dict) - - -def test_create_hyperparameter_tuning_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_hyperparameter_tuning_job), - '__call__') as call: - client.create_hyperparameter_tuning_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CreateHyperparameterTuningJobRequest() - - -@pytest.mark.asyncio -async def test_create_hyperparameter_tuning_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CreateHyperparameterTuningJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_hyperparameter_tuning_job.HyperparameterTuningJob( - name='name_value', - display_name='display_name_value', - max_trial_count=1609, - parallel_trial_count=2128, - max_failed_trial_count=2317, - state=job_state.JobState.JOB_STATE_QUEUED, - )) - response = await client.create_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CreateHyperparameterTuningJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_hyperparameter_tuning_job.HyperparameterTuningJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.max_trial_count == 1609 - assert response.parallel_trial_count == 2128 - assert response.max_failed_trial_count == 2317 - assert response.state == job_state.JobState.JOB_STATE_QUEUED - - -@pytest.mark.asyncio -async def test_create_hyperparameter_tuning_job_async_from_dict(): - await test_create_hyperparameter_tuning_job_async(request_type=dict) - - -def test_create_hyperparameter_tuning_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CreateHyperparameterTuningJobRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_hyperparameter_tuning_job), - '__call__') as call: - call.return_value = gca_hyperparameter_tuning_job.HyperparameterTuningJob() - client.create_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_hyperparameter_tuning_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CreateHyperparameterTuningJobRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_hyperparameter_tuning_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_hyperparameter_tuning_job.HyperparameterTuningJob()) - await client.create_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_hyperparameter_tuning_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_hyperparameter_tuning_job.HyperparameterTuningJob() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_hyperparameter_tuning_job( - parent='parent_value', - hyperparameter_tuning_job=gca_hyperparameter_tuning_job.HyperparameterTuningJob(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].hyperparameter_tuning_job - mock_val = gca_hyperparameter_tuning_job.HyperparameterTuningJob(name='name_value') - assert arg == mock_val - - -def test_create_hyperparameter_tuning_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_hyperparameter_tuning_job( - job_service.CreateHyperparameterTuningJobRequest(), - parent='parent_value', - hyperparameter_tuning_job=gca_hyperparameter_tuning_job.HyperparameterTuningJob(name='name_value'), - ) - - -@pytest.mark.asyncio -async def test_create_hyperparameter_tuning_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_hyperparameter_tuning_job.HyperparameterTuningJob() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_hyperparameter_tuning_job.HyperparameterTuningJob()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_hyperparameter_tuning_job( - parent='parent_value', - hyperparameter_tuning_job=gca_hyperparameter_tuning_job.HyperparameterTuningJob(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].hyperparameter_tuning_job - mock_val = gca_hyperparameter_tuning_job.HyperparameterTuningJob(name='name_value') - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_hyperparameter_tuning_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_hyperparameter_tuning_job( - job_service.CreateHyperparameterTuningJobRequest(), - parent='parent_value', - hyperparameter_tuning_job=gca_hyperparameter_tuning_job.HyperparameterTuningJob(name='name_value'), - ) - - -def test_get_hyperparameter_tuning_job(transport: str = 'grpc', request_type=job_service.GetHyperparameterTuningJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = hyperparameter_tuning_job.HyperparameterTuningJob( - name='name_value', - display_name='display_name_value', - max_trial_count=1609, - parallel_trial_count=2128, - max_failed_trial_count=2317, - state=job_state.JobState.JOB_STATE_QUEUED, - ) - response = client.get_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.GetHyperparameterTuningJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, hyperparameter_tuning_job.HyperparameterTuningJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.max_trial_count == 1609 - assert response.parallel_trial_count == 2128 - assert response.max_failed_trial_count == 2317 - assert response.state == job_state.JobState.JOB_STATE_QUEUED - - -def test_get_hyperparameter_tuning_job_from_dict(): - test_get_hyperparameter_tuning_job(request_type=dict) - - -def test_get_hyperparameter_tuning_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_hyperparameter_tuning_job), - '__call__') as call: - client.get_hyperparameter_tuning_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.GetHyperparameterTuningJobRequest() - - -@pytest.mark.asyncio -async def test_get_hyperparameter_tuning_job_async(transport: str = 'grpc_asyncio', request_type=job_service.GetHyperparameterTuningJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(hyperparameter_tuning_job.HyperparameterTuningJob( - name='name_value', - display_name='display_name_value', - max_trial_count=1609, - parallel_trial_count=2128, - max_failed_trial_count=2317, - state=job_state.JobState.JOB_STATE_QUEUED, - )) - response = await client.get_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.GetHyperparameterTuningJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, hyperparameter_tuning_job.HyperparameterTuningJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.max_trial_count == 1609 - assert response.parallel_trial_count == 2128 - assert response.max_failed_trial_count == 2317 - assert response.state == job_state.JobState.JOB_STATE_QUEUED - - -@pytest.mark.asyncio -async def test_get_hyperparameter_tuning_job_async_from_dict(): - await test_get_hyperparameter_tuning_job_async(request_type=dict) - - -def test_get_hyperparameter_tuning_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.GetHyperparameterTuningJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_hyperparameter_tuning_job), - '__call__') as call: - call.return_value = hyperparameter_tuning_job.HyperparameterTuningJob() - client.get_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_hyperparameter_tuning_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.GetHyperparameterTuningJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_hyperparameter_tuning_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(hyperparameter_tuning_job.HyperparameterTuningJob()) - await client.get_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_hyperparameter_tuning_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = hyperparameter_tuning_job.HyperparameterTuningJob() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_hyperparameter_tuning_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_hyperparameter_tuning_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_hyperparameter_tuning_job( - job_service.GetHyperparameterTuningJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_hyperparameter_tuning_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = hyperparameter_tuning_job.HyperparameterTuningJob() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(hyperparameter_tuning_job.HyperparameterTuningJob()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_hyperparameter_tuning_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_hyperparameter_tuning_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_hyperparameter_tuning_job( - job_service.GetHyperparameterTuningJobRequest(), - name='name_value', - ) - - -def test_list_hyperparameter_tuning_jobs(transport: str = 'grpc', request_type=job_service.ListHyperparameterTuningJobsRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_hyperparameter_tuning_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.ListHyperparameterTuningJobsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_hyperparameter_tuning_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ListHyperparameterTuningJobsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListHyperparameterTuningJobsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_hyperparameter_tuning_jobs_from_dict(): - test_list_hyperparameter_tuning_jobs(request_type=dict) - - -def test_list_hyperparameter_tuning_jobs_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_hyperparameter_tuning_jobs), - '__call__') as call: - client.list_hyperparameter_tuning_jobs() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ListHyperparameterTuningJobsRequest() - - -@pytest.mark.asyncio -async def test_list_hyperparameter_tuning_jobs_async(transport: str = 'grpc_asyncio', request_type=job_service.ListHyperparameterTuningJobsRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_hyperparameter_tuning_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListHyperparameterTuningJobsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_hyperparameter_tuning_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ListHyperparameterTuningJobsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListHyperparameterTuningJobsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_hyperparameter_tuning_jobs_async_from_dict(): - await test_list_hyperparameter_tuning_jobs_async(request_type=dict) - - -def test_list_hyperparameter_tuning_jobs_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.ListHyperparameterTuningJobsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_hyperparameter_tuning_jobs), - '__call__') as call: - call.return_value = job_service.ListHyperparameterTuningJobsResponse() - client.list_hyperparameter_tuning_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_hyperparameter_tuning_jobs_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.ListHyperparameterTuningJobsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_hyperparameter_tuning_jobs), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListHyperparameterTuningJobsResponse()) - await client.list_hyperparameter_tuning_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_hyperparameter_tuning_jobs_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_hyperparameter_tuning_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.ListHyperparameterTuningJobsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_hyperparameter_tuning_jobs( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_hyperparameter_tuning_jobs_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_hyperparameter_tuning_jobs( - job_service.ListHyperparameterTuningJobsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_hyperparameter_tuning_jobs_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_hyperparameter_tuning_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.ListHyperparameterTuningJobsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListHyperparameterTuningJobsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_hyperparameter_tuning_jobs( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_hyperparameter_tuning_jobs_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_hyperparameter_tuning_jobs( - job_service.ListHyperparameterTuningJobsRequest(), - parent='parent_value', - ) - - -def test_list_hyperparameter_tuning_jobs_pager(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_hyperparameter_tuning_jobs), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[ - hyperparameter_tuning_job.HyperparameterTuningJob(), - hyperparameter_tuning_job.HyperparameterTuningJob(), - hyperparameter_tuning_job.HyperparameterTuningJob(), - ], - next_page_token='abc', - ), - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[], - next_page_token='def', - ), - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[ - hyperparameter_tuning_job.HyperparameterTuningJob(), - ], - next_page_token='ghi', - ), - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[ - hyperparameter_tuning_job.HyperparameterTuningJob(), - hyperparameter_tuning_job.HyperparameterTuningJob(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_hyperparameter_tuning_jobs(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, hyperparameter_tuning_job.HyperparameterTuningJob) - for i in results) - -def test_list_hyperparameter_tuning_jobs_pages(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_hyperparameter_tuning_jobs), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[ - hyperparameter_tuning_job.HyperparameterTuningJob(), - hyperparameter_tuning_job.HyperparameterTuningJob(), - hyperparameter_tuning_job.HyperparameterTuningJob(), - ], - next_page_token='abc', - ), - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[], - next_page_token='def', - ), - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[ - hyperparameter_tuning_job.HyperparameterTuningJob(), - ], - next_page_token='ghi', - ), - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[ - hyperparameter_tuning_job.HyperparameterTuningJob(), - hyperparameter_tuning_job.HyperparameterTuningJob(), - ], - ), - RuntimeError, - ) - pages = list(client.list_hyperparameter_tuning_jobs(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_hyperparameter_tuning_jobs_async_pager(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_hyperparameter_tuning_jobs), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[ - hyperparameter_tuning_job.HyperparameterTuningJob(), - hyperparameter_tuning_job.HyperparameterTuningJob(), - hyperparameter_tuning_job.HyperparameterTuningJob(), - ], - next_page_token='abc', - ), - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[], - next_page_token='def', - ), - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[ - hyperparameter_tuning_job.HyperparameterTuningJob(), - ], - next_page_token='ghi', - ), - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[ - hyperparameter_tuning_job.HyperparameterTuningJob(), - hyperparameter_tuning_job.HyperparameterTuningJob(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_hyperparameter_tuning_jobs(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, hyperparameter_tuning_job.HyperparameterTuningJob) - for i in responses) - -@pytest.mark.asyncio -async def test_list_hyperparameter_tuning_jobs_async_pages(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_hyperparameter_tuning_jobs), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[ - hyperparameter_tuning_job.HyperparameterTuningJob(), - hyperparameter_tuning_job.HyperparameterTuningJob(), - hyperparameter_tuning_job.HyperparameterTuningJob(), - ], - next_page_token='abc', - ), - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[], - next_page_token='def', - ), - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[ - hyperparameter_tuning_job.HyperparameterTuningJob(), - ], - next_page_token='ghi', - ), - job_service.ListHyperparameterTuningJobsResponse( - hyperparameter_tuning_jobs=[ - hyperparameter_tuning_job.HyperparameterTuningJob(), - hyperparameter_tuning_job.HyperparameterTuningJob(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_hyperparameter_tuning_jobs(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_delete_hyperparameter_tuning_job(transport: str = 'grpc', request_type=job_service.DeleteHyperparameterTuningJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.DeleteHyperparameterTuningJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_hyperparameter_tuning_job_from_dict(): - test_delete_hyperparameter_tuning_job(request_type=dict) - - -def test_delete_hyperparameter_tuning_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_hyperparameter_tuning_job), - '__call__') as call: - client.delete_hyperparameter_tuning_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.DeleteHyperparameterTuningJobRequest() - - -@pytest.mark.asyncio -async def test_delete_hyperparameter_tuning_job_async(transport: str = 'grpc_asyncio', request_type=job_service.DeleteHyperparameterTuningJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.DeleteHyperparameterTuningJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_hyperparameter_tuning_job_async_from_dict(): - await test_delete_hyperparameter_tuning_job_async(request_type=dict) - - -def test_delete_hyperparameter_tuning_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.DeleteHyperparameterTuningJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_hyperparameter_tuning_job), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_hyperparameter_tuning_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.DeleteHyperparameterTuningJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_hyperparameter_tuning_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_hyperparameter_tuning_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_hyperparameter_tuning_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_delete_hyperparameter_tuning_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_hyperparameter_tuning_job( - job_service.DeleteHyperparameterTuningJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_hyperparameter_tuning_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_hyperparameter_tuning_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_hyperparameter_tuning_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_hyperparameter_tuning_job( - job_service.DeleteHyperparameterTuningJobRequest(), - name='name_value', - ) - - -def test_cancel_hyperparameter_tuning_job(transport: str = 'grpc', request_type=job_service.CancelHyperparameterTuningJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.cancel_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CancelHyperparameterTuningJobRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -def test_cancel_hyperparameter_tuning_job_from_dict(): - test_cancel_hyperparameter_tuning_job(request_type=dict) - - -def test_cancel_hyperparameter_tuning_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_hyperparameter_tuning_job), - '__call__') as call: - client.cancel_hyperparameter_tuning_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CancelHyperparameterTuningJobRequest() - - -@pytest.mark.asyncio -async def test_cancel_hyperparameter_tuning_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CancelHyperparameterTuningJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.cancel_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CancelHyperparameterTuningJobRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_cancel_hyperparameter_tuning_job_async_from_dict(): - await test_cancel_hyperparameter_tuning_job_async(request_type=dict) - - -def test_cancel_hyperparameter_tuning_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CancelHyperparameterTuningJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_hyperparameter_tuning_job), - '__call__') as call: - call.return_value = None - client.cancel_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_cancel_hyperparameter_tuning_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CancelHyperparameterTuningJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_hyperparameter_tuning_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.cancel_hyperparameter_tuning_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_cancel_hyperparameter_tuning_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.cancel_hyperparameter_tuning_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_cancel_hyperparameter_tuning_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.cancel_hyperparameter_tuning_job( - job_service.CancelHyperparameterTuningJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_cancel_hyperparameter_tuning_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_hyperparameter_tuning_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.cancel_hyperparameter_tuning_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_cancel_hyperparameter_tuning_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.cancel_hyperparameter_tuning_job( - job_service.CancelHyperparameterTuningJobRequest(), - name='name_value', - ) - - -def test_create_batch_prediction_job(transport: str = 'grpc', request_type=job_service.CreateBatchPredictionJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_batch_prediction_job.BatchPredictionJob( - name='name_value', - display_name='display_name_value', - model='model_value', - generate_explanation=True, - state=job_state.JobState.JOB_STATE_QUEUED, - ) - response = client.create_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CreateBatchPredictionJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_batch_prediction_job.BatchPredictionJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.model == 'model_value' - assert response.generate_explanation is True - assert response.state == job_state.JobState.JOB_STATE_QUEUED - - -def test_create_batch_prediction_job_from_dict(): - test_create_batch_prediction_job(request_type=dict) - - -def test_create_batch_prediction_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_batch_prediction_job), - '__call__') as call: - client.create_batch_prediction_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CreateBatchPredictionJobRequest() - - -@pytest.mark.asyncio -async def test_create_batch_prediction_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CreateBatchPredictionJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_batch_prediction_job.BatchPredictionJob( - name='name_value', - display_name='display_name_value', - model='model_value', - generate_explanation=True, - state=job_state.JobState.JOB_STATE_QUEUED, - )) - response = await client.create_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CreateBatchPredictionJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_batch_prediction_job.BatchPredictionJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.model == 'model_value' - assert response.generate_explanation is True - assert response.state == job_state.JobState.JOB_STATE_QUEUED - - -@pytest.mark.asyncio -async def test_create_batch_prediction_job_async_from_dict(): - await test_create_batch_prediction_job_async(request_type=dict) - - -def test_create_batch_prediction_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CreateBatchPredictionJobRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_batch_prediction_job), - '__call__') as call: - call.return_value = gca_batch_prediction_job.BatchPredictionJob() - client.create_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_batch_prediction_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CreateBatchPredictionJobRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_batch_prediction_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_batch_prediction_job.BatchPredictionJob()) - await client.create_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_batch_prediction_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_batch_prediction_job.BatchPredictionJob() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_batch_prediction_job( - parent='parent_value', - batch_prediction_job=gca_batch_prediction_job.BatchPredictionJob(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].batch_prediction_job - mock_val = gca_batch_prediction_job.BatchPredictionJob(name='name_value') - assert arg == mock_val - - -def test_create_batch_prediction_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_batch_prediction_job( - job_service.CreateBatchPredictionJobRequest(), - parent='parent_value', - batch_prediction_job=gca_batch_prediction_job.BatchPredictionJob(name='name_value'), - ) - - -@pytest.mark.asyncio -async def test_create_batch_prediction_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_batch_prediction_job.BatchPredictionJob() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_batch_prediction_job.BatchPredictionJob()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_batch_prediction_job( - parent='parent_value', - batch_prediction_job=gca_batch_prediction_job.BatchPredictionJob(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].batch_prediction_job - mock_val = gca_batch_prediction_job.BatchPredictionJob(name='name_value') - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_batch_prediction_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_batch_prediction_job( - job_service.CreateBatchPredictionJobRequest(), - parent='parent_value', - batch_prediction_job=gca_batch_prediction_job.BatchPredictionJob(name='name_value'), - ) - - -def test_get_batch_prediction_job(transport: str = 'grpc', request_type=job_service.GetBatchPredictionJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = batch_prediction_job.BatchPredictionJob( - name='name_value', - display_name='display_name_value', - model='model_value', - generate_explanation=True, - state=job_state.JobState.JOB_STATE_QUEUED, - ) - response = client.get_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.GetBatchPredictionJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, batch_prediction_job.BatchPredictionJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.model == 'model_value' - assert response.generate_explanation is True - assert response.state == job_state.JobState.JOB_STATE_QUEUED - - -def test_get_batch_prediction_job_from_dict(): - test_get_batch_prediction_job(request_type=dict) - - -def test_get_batch_prediction_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_batch_prediction_job), - '__call__') as call: - client.get_batch_prediction_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.GetBatchPredictionJobRequest() - - -@pytest.mark.asyncio -async def test_get_batch_prediction_job_async(transport: str = 'grpc_asyncio', request_type=job_service.GetBatchPredictionJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(batch_prediction_job.BatchPredictionJob( - name='name_value', - display_name='display_name_value', - model='model_value', - generate_explanation=True, - state=job_state.JobState.JOB_STATE_QUEUED, - )) - response = await client.get_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.GetBatchPredictionJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, batch_prediction_job.BatchPredictionJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.model == 'model_value' - assert response.generate_explanation is True - assert response.state == job_state.JobState.JOB_STATE_QUEUED - - -@pytest.mark.asyncio -async def test_get_batch_prediction_job_async_from_dict(): - await test_get_batch_prediction_job_async(request_type=dict) - - -def test_get_batch_prediction_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.GetBatchPredictionJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_batch_prediction_job), - '__call__') as call: - call.return_value = batch_prediction_job.BatchPredictionJob() - client.get_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_batch_prediction_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.GetBatchPredictionJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_batch_prediction_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(batch_prediction_job.BatchPredictionJob()) - await client.get_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_batch_prediction_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = batch_prediction_job.BatchPredictionJob() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_batch_prediction_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_batch_prediction_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_batch_prediction_job( - job_service.GetBatchPredictionJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_batch_prediction_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = batch_prediction_job.BatchPredictionJob() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(batch_prediction_job.BatchPredictionJob()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_batch_prediction_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_batch_prediction_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_batch_prediction_job( - job_service.GetBatchPredictionJobRequest(), - name='name_value', - ) - - -def test_list_batch_prediction_jobs(transport: str = 'grpc', request_type=job_service.ListBatchPredictionJobsRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_batch_prediction_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.ListBatchPredictionJobsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_batch_prediction_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ListBatchPredictionJobsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListBatchPredictionJobsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_batch_prediction_jobs_from_dict(): - test_list_batch_prediction_jobs(request_type=dict) - - -def test_list_batch_prediction_jobs_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_batch_prediction_jobs), - '__call__') as call: - client.list_batch_prediction_jobs() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ListBatchPredictionJobsRequest() - - -@pytest.mark.asyncio -async def test_list_batch_prediction_jobs_async(transport: str = 'grpc_asyncio', request_type=job_service.ListBatchPredictionJobsRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_batch_prediction_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListBatchPredictionJobsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_batch_prediction_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ListBatchPredictionJobsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListBatchPredictionJobsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_batch_prediction_jobs_async_from_dict(): - await test_list_batch_prediction_jobs_async(request_type=dict) - - -def test_list_batch_prediction_jobs_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.ListBatchPredictionJobsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_batch_prediction_jobs), - '__call__') as call: - call.return_value = job_service.ListBatchPredictionJobsResponse() - client.list_batch_prediction_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_batch_prediction_jobs_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.ListBatchPredictionJobsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_batch_prediction_jobs), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListBatchPredictionJobsResponse()) - await client.list_batch_prediction_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_batch_prediction_jobs_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_batch_prediction_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.ListBatchPredictionJobsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_batch_prediction_jobs( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_batch_prediction_jobs_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_batch_prediction_jobs( - job_service.ListBatchPredictionJobsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_batch_prediction_jobs_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_batch_prediction_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.ListBatchPredictionJobsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListBatchPredictionJobsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_batch_prediction_jobs( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_batch_prediction_jobs_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_batch_prediction_jobs( - job_service.ListBatchPredictionJobsRequest(), - parent='parent_value', - ) - - -def test_list_batch_prediction_jobs_pager(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_batch_prediction_jobs), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[ - batch_prediction_job.BatchPredictionJob(), - batch_prediction_job.BatchPredictionJob(), - batch_prediction_job.BatchPredictionJob(), - ], - next_page_token='abc', - ), - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[], - next_page_token='def', - ), - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[ - batch_prediction_job.BatchPredictionJob(), - ], - next_page_token='ghi', - ), - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[ - batch_prediction_job.BatchPredictionJob(), - batch_prediction_job.BatchPredictionJob(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_batch_prediction_jobs(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, batch_prediction_job.BatchPredictionJob) - for i in results) - -def test_list_batch_prediction_jobs_pages(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_batch_prediction_jobs), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[ - batch_prediction_job.BatchPredictionJob(), - batch_prediction_job.BatchPredictionJob(), - batch_prediction_job.BatchPredictionJob(), - ], - next_page_token='abc', - ), - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[], - next_page_token='def', - ), - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[ - batch_prediction_job.BatchPredictionJob(), - ], - next_page_token='ghi', - ), - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[ - batch_prediction_job.BatchPredictionJob(), - batch_prediction_job.BatchPredictionJob(), - ], - ), - RuntimeError, - ) - pages = list(client.list_batch_prediction_jobs(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_batch_prediction_jobs_async_pager(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_batch_prediction_jobs), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[ - batch_prediction_job.BatchPredictionJob(), - batch_prediction_job.BatchPredictionJob(), - batch_prediction_job.BatchPredictionJob(), - ], - next_page_token='abc', - ), - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[], - next_page_token='def', - ), - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[ - batch_prediction_job.BatchPredictionJob(), - ], - next_page_token='ghi', - ), - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[ - batch_prediction_job.BatchPredictionJob(), - batch_prediction_job.BatchPredictionJob(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_batch_prediction_jobs(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, batch_prediction_job.BatchPredictionJob) - for i in responses) - -@pytest.mark.asyncio -async def test_list_batch_prediction_jobs_async_pages(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_batch_prediction_jobs), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[ - batch_prediction_job.BatchPredictionJob(), - batch_prediction_job.BatchPredictionJob(), - batch_prediction_job.BatchPredictionJob(), - ], - next_page_token='abc', - ), - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[], - next_page_token='def', - ), - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[ - batch_prediction_job.BatchPredictionJob(), - ], - next_page_token='ghi', - ), - job_service.ListBatchPredictionJobsResponse( - batch_prediction_jobs=[ - batch_prediction_job.BatchPredictionJob(), - batch_prediction_job.BatchPredictionJob(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_batch_prediction_jobs(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_delete_batch_prediction_job(transport: str = 'grpc', request_type=job_service.DeleteBatchPredictionJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.DeleteBatchPredictionJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_batch_prediction_job_from_dict(): - test_delete_batch_prediction_job(request_type=dict) - - -def test_delete_batch_prediction_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_batch_prediction_job), - '__call__') as call: - client.delete_batch_prediction_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.DeleteBatchPredictionJobRequest() - - -@pytest.mark.asyncio -async def test_delete_batch_prediction_job_async(transport: str = 'grpc_asyncio', request_type=job_service.DeleteBatchPredictionJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.DeleteBatchPredictionJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_batch_prediction_job_async_from_dict(): - await test_delete_batch_prediction_job_async(request_type=dict) - - -def test_delete_batch_prediction_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.DeleteBatchPredictionJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_batch_prediction_job), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_batch_prediction_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.DeleteBatchPredictionJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_batch_prediction_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_batch_prediction_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_batch_prediction_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_delete_batch_prediction_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_batch_prediction_job( - job_service.DeleteBatchPredictionJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_batch_prediction_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_batch_prediction_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_batch_prediction_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_batch_prediction_job( - job_service.DeleteBatchPredictionJobRequest(), - name='name_value', - ) - - -def test_cancel_batch_prediction_job(transport: str = 'grpc', request_type=job_service.CancelBatchPredictionJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.cancel_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CancelBatchPredictionJobRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -def test_cancel_batch_prediction_job_from_dict(): - test_cancel_batch_prediction_job(request_type=dict) - - -def test_cancel_batch_prediction_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_batch_prediction_job), - '__call__') as call: - client.cancel_batch_prediction_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CancelBatchPredictionJobRequest() - - -@pytest.mark.asyncio -async def test_cancel_batch_prediction_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CancelBatchPredictionJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.cancel_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CancelBatchPredictionJobRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_cancel_batch_prediction_job_async_from_dict(): - await test_cancel_batch_prediction_job_async(request_type=dict) - - -def test_cancel_batch_prediction_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CancelBatchPredictionJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_batch_prediction_job), - '__call__') as call: - call.return_value = None - client.cancel_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_cancel_batch_prediction_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CancelBatchPredictionJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_batch_prediction_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.cancel_batch_prediction_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_cancel_batch_prediction_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.cancel_batch_prediction_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_cancel_batch_prediction_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.cancel_batch_prediction_job( - job_service.CancelBatchPredictionJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_cancel_batch_prediction_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_batch_prediction_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.cancel_batch_prediction_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_cancel_batch_prediction_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.cancel_batch_prediction_job( - job_service.CancelBatchPredictionJobRequest(), - name='name_value', - ) - - -def test_create_model_deployment_monitoring_job(transport: str = 'grpc', request_type=job_service.CreateModelDeploymentMonitoringJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob( - name='name_value', - display_name='display_name_value', - endpoint='endpoint_value', - state=job_state.JobState.JOB_STATE_QUEUED, - schedule_state=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING, - predict_instance_schema_uri='predict_instance_schema_uri_value', - analysis_instance_schema_uri='analysis_instance_schema_uri_value', - enable_monitoring_pipeline_logs=True, - ) - response = client.create_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CreateModelDeploymentMonitoringJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.endpoint == 'endpoint_value' - assert response.state == job_state.JobState.JOB_STATE_QUEUED - assert response.schedule_state == gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING - assert response.predict_instance_schema_uri == 'predict_instance_schema_uri_value' - assert response.analysis_instance_schema_uri == 'analysis_instance_schema_uri_value' - assert response.enable_monitoring_pipeline_logs is True - - -def test_create_model_deployment_monitoring_job_from_dict(): - test_create_model_deployment_monitoring_job(request_type=dict) - - -def test_create_model_deployment_monitoring_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_model_deployment_monitoring_job), - '__call__') as call: - client.create_model_deployment_monitoring_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CreateModelDeploymentMonitoringJobRequest() - - -@pytest.mark.asyncio -async def test_create_model_deployment_monitoring_job_async(transport: str = 'grpc_asyncio', request_type=job_service.CreateModelDeploymentMonitoringJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob( - name='name_value', - display_name='display_name_value', - endpoint='endpoint_value', - state=job_state.JobState.JOB_STATE_QUEUED, - schedule_state=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING, - predict_instance_schema_uri='predict_instance_schema_uri_value', - analysis_instance_schema_uri='analysis_instance_schema_uri_value', - enable_monitoring_pipeline_logs=True, - )) - response = await client.create_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.CreateModelDeploymentMonitoringJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.endpoint == 'endpoint_value' - assert response.state == job_state.JobState.JOB_STATE_QUEUED - assert response.schedule_state == gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING - assert response.predict_instance_schema_uri == 'predict_instance_schema_uri_value' - assert response.analysis_instance_schema_uri == 'analysis_instance_schema_uri_value' - assert response.enable_monitoring_pipeline_logs is True - - -@pytest.mark.asyncio -async def test_create_model_deployment_monitoring_job_async_from_dict(): - await test_create_model_deployment_monitoring_job_async(request_type=dict) - - -def test_create_model_deployment_monitoring_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CreateModelDeploymentMonitoringJobRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_model_deployment_monitoring_job), - '__call__') as call: - call.return_value = gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob() - client.create_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_model_deployment_monitoring_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.CreateModelDeploymentMonitoringJobRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_model_deployment_monitoring_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob()) - await client.create_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_model_deployment_monitoring_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_model_deployment_monitoring_job( - parent='parent_value', - model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].model_deployment_monitoring_job - mock_val = gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value') - assert arg == mock_val - - -def test_create_model_deployment_monitoring_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_model_deployment_monitoring_job( - job_service.CreateModelDeploymentMonitoringJobRequest(), - parent='parent_value', - model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value'), - ) - - -@pytest.mark.asyncio -async def test_create_model_deployment_monitoring_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_model_deployment_monitoring_job( - parent='parent_value', - model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].model_deployment_monitoring_job - mock_val = gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value') - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_model_deployment_monitoring_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_model_deployment_monitoring_job( - job_service.CreateModelDeploymentMonitoringJobRequest(), - parent='parent_value', - model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value'), - ) - - -def test_search_model_deployment_monitoring_stats_anomalies(transport: str = 'grpc', request_type=job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_model_deployment_monitoring_stats_anomalies), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( - next_page_token='next_page_token_value', - ) - response = client.search_model_deployment_monitoring_stats_anomalies(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.SearchModelDeploymentMonitoringStatsAnomaliesPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_search_model_deployment_monitoring_stats_anomalies_from_dict(): - test_search_model_deployment_monitoring_stats_anomalies(request_type=dict) - - -def test_search_model_deployment_monitoring_stats_anomalies_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_model_deployment_monitoring_stats_anomalies), - '__call__') as call: - client.search_model_deployment_monitoring_stats_anomalies() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest() - - -@pytest.mark.asyncio -async def test_search_model_deployment_monitoring_stats_anomalies_async(transport: str = 'grpc_asyncio', request_type=job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_model_deployment_monitoring_stats_anomalies), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( - next_page_token='next_page_token_value', - )) - response = await client.search_model_deployment_monitoring_stats_anomalies(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.SearchModelDeploymentMonitoringStatsAnomaliesAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_search_model_deployment_monitoring_stats_anomalies_async_from_dict(): - await test_search_model_deployment_monitoring_stats_anomalies_async(request_type=dict) - - -def test_search_model_deployment_monitoring_stats_anomalies_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest() - - request.model_deployment_monitoring_job = 'model_deployment_monitoring_job/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_model_deployment_monitoring_stats_anomalies), - '__call__') as call: - call.return_value = job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse() - client.search_model_deployment_monitoring_stats_anomalies(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'model_deployment_monitoring_job=model_deployment_monitoring_job/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_search_model_deployment_monitoring_stats_anomalies_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest() - - request.model_deployment_monitoring_job = 'model_deployment_monitoring_job/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_model_deployment_monitoring_stats_anomalies), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse()) - await client.search_model_deployment_monitoring_stats_anomalies(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'model_deployment_monitoring_job=model_deployment_monitoring_job/value', - ) in kw['metadata'] - - -def test_search_model_deployment_monitoring_stats_anomalies_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_model_deployment_monitoring_stats_anomalies), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.search_model_deployment_monitoring_stats_anomalies( - model_deployment_monitoring_job='model_deployment_monitoring_job_value', - deployed_model_id='deployed_model_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].model_deployment_monitoring_job - mock_val = 'model_deployment_monitoring_job_value' - assert arg == mock_val - arg = args[0].deployed_model_id - mock_val = 'deployed_model_id_value' - assert arg == mock_val - - -def test_search_model_deployment_monitoring_stats_anomalies_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.search_model_deployment_monitoring_stats_anomalies( - job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest(), - model_deployment_monitoring_job='model_deployment_monitoring_job_value', - deployed_model_id='deployed_model_id_value', - ) - - -@pytest.mark.asyncio -async def test_search_model_deployment_monitoring_stats_anomalies_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_model_deployment_monitoring_stats_anomalies), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.search_model_deployment_monitoring_stats_anomalies( - model_deployment_monitoring_job='model_deployment_monitoring_job_value', - deployed_model_id='deployed_model_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].model_deployment_monitoring_job - mock_val = 'model_deployment_monitoring_job_value' - assert arg == mock_val - arg = args[0].deployed_model_id - mock_val = 'deployed_model_id_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_search_model_deployment_monitoring_stats_anomalies_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.search_model_deployment_monitoring_stats_anomalies( - job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest(), - model_deployment_monitoring_job='model_deployment_monitoring_job_value', - deployed_model_id='deployed_model_id_value', - ) - - -def test_search_model_deployment_monitoring_stats_anomalies_pager(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_model_deployment_monitoring_stats_anomalies), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( - monitoring_stats=[ - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - ], - next_page_token='abc', - ), - job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( - monitoring_stats=[], - next_page_token='def', - ), - job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( - monitoring_stats=[ - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - ], - next_page_token='ghi', - ), - job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( - monitoring_stats=[ - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('model_deployment_monitoring_job', ''), - )), - ) - pager = client.search_model_deployment_monitoring_stats_anomalies(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies) - for i in results) - -def test_search_model_deployment_monitoring_stats_anomalies_pages(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_model_deployment_monitoring_stats_anomalies), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( - monitoring_stats=[ - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - ], - next_page_token='abc', - ), - job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( - monitoring_stats=[], - next_page_token='def', - ), - job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( - monitoring_stats=[ - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - ], - next_page_token='ghi', - ), - job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( - monitoring_stats=[ - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - ], - ), - RuntimeError, - ) - pages = list(client.search_model_deployment_monitoring_stats_anomalies(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_search_model_deployment_monitoring_stats_anomalies_async_pager(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_model_deployment_monitoring_stats_anomalies), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( - monitoring_stats=[ - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - ], - next_page_token='abc', - ), - job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( - monitoring_stats=[], - next_page_token='def', - ), - job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( - monitoring_stats=[ - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - ], - next_page_token='ghi', - ), - job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( - monitoring_stats=[ - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - ], - ), - RuntimeError, - ) - async_pager = await client.search_model_deployment_monitoring_stats_anomalies(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies) - for i in responses) - -@pytest.mark.asyncio -async def test_search_model_deployment_monitoring_stats_anomalies_async_pages(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_model_deployment_monitoring_stats_anomalies), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( - monitoring_stats=[ - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - ], - next_page_token='abc', - ), - job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( - monitoring_stats=[], - next_page_token='def', - ), - job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( - monitoring_stats=[ - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - ], - next_page_token='ghi', - ), - job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse( - monitoring_stats=[ - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.search_model_deployment_monitoring_stats_anomalies(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_get_model_deployment_monitoring_job(transport: str = 'grpc', request_type=job_service.GetModelDeploymentMonitoringJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_deployment_monitoring_job.ModelDeploymentMonitoringJob( - name='name_value', - display_name='display_name_value', - endpoint='endpoint_value', - state=job_state.JobState.JOB_STATE_QUEUED, - schedule_state=model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING, - predict_instance_schema_uri='predict_instance_schema_uri_value', - analysis_instance_schema_uri='analysis_instance_schema_uri_value', - enable_monitoring_pipeline_logs=True, - ) - response = client.get_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.GetModelDeploymentMonitoringJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, model_deployment_monitoring_job.ModelDeploymentMonitoringJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.endpoint == 'endpoint_value' - assert response.state == job_state.JobState.JOB_STATE_QUEUED - assert response.schedule_state == model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING - assert response.predict_instance_schema_uri == 'predict_instance_schema_uri_value' - assert response.analysis_instance_schema_uri == 'analysis_instance_schema_uri_value' - assert response.enable_monitoring_pipeline_logs is True - - -def test_get_model_deployment_monitoring_job_from_dict(): - test_get_model_deployment_monitoring_job(request_type=dict) - - -def test_get_model_deployment_monitoring_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_deployment_monitoring_job), - '__call__') as call: - client.get_model_deployment_monitoring_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.GetModelDeploymentMonitoringJobRequest() - - -@pytest.mark.asyncio -async def test_get_model_deployment_monitoring_job_async(transport: str = 'grpc_asyncio', request_type=job_service.GetModelDeploymentMonitoringJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model_deployment_monitoring_job.ModelDeploymentMonitoringJob( - name='name_value', - display_name='display_name_value', - endpoint='endpoint_value', - state=job_state.JobState.JOB_STATE_QUEUED, - schedule_state=model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING, - predict_instance_schema_uri='predict_instance_schema_uri_value', - analysis_instance_schema_uri='analysis_instance_schema_uri_value', - enable_monitoring_pipeline_logs=True, - )) - response = await client.get_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.GetModelDeploymentMonitoringJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, model_deployment_monitoring_job.ModelDeploymentMonitoringJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.endpoint == 'endpoint_value' - assert response.state == job_state.JobState.JOB_STATE_QUEUED - assert response.schedule_state == model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING - assert response.predict_instance_schema_uri == 'predict_instance_schema_uri_value' - assert response.analysis_instance_schema_uri == 'analysis_instance_schema_uri_value' - assert response.enable_monitoring_pipeline_logs is True - - -@pytest.mark.asyncio -async def test_get_model_deployment_monitoring_job_async_from_dict(): - await test_get_model_deployment_monitoring_job_async(request_type=dict) - - -def test_get_model_deployment_monitoring_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.GetModelDeploymentMonitoringJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_deployment_monitoring_job), - '__call__') as call: - call.return_value = model_deployment_monitoring_job.ModelDeploymentMonitoringJob() - client.get_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_model_deployment_monitoring_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.GetModelDeploymentMonitoringJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_deployment_monitoring_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_deployment_monitoring_job.ModelDeploymentMonitoringJob()) - await client.get_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_model_deployment_monitoring_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_deployment_monitoring_job.ModelDeploymentMonitoringJob() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_model_deployment_monitoring_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_model_deployment_monitoring_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_model_deployment_monitoring_job( - job_service.GetModelDeploymentMonitoringJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_model_deployment_monitoring_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_deployment_monitoring_job.ModelDeploymentMonitoringJob() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_deployment_monitoring_job.ModelDeploymentMonitoringJob()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_model_deployment_monitoring_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_model_deployment_monitoring_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_model_deployment_monitoring_job( - job_service.GetModelDeploymentMonitoringJobRequest(), - name='name_value', - ) - - -def test_list_model_deployment_monitoring_jobs(transport: str = 'grpc', request_type=job_service.ListModelDeploymentMonitoringJobsRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_deployment_monitoring_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.ListModelDeploymentMonitoringJobsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_model_deployment_monitoring_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ListModelDeploymentMonitoringJobsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListModelDeploymentMonitoringJobsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_model_deployment_monitoring_jobs_from_dict(): - test_list_model_deployment_monitoring_jobs(request_type=dict) - - -def test_list_model_deployment_monitoring_jobs_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_deployment_monitoring_jobs), - '__call__') as call: - client.list_model_deployment_monitoring_jobs() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ListModelDeploymentMonitoringJobsRequest() - - -@pytest.mark.asyncio -async def test_list_model_deployment_monitoring_jobs_async(transport: str = 'grpc_asyncio', request_type=job_service.ListModelDeploymentMonitoringJobsRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_deployment_monitoring_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListModelDeploymentMonitoringJobsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_model_deployment_monitoring_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ListModelDeploymentMonitoringJobsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListModelDeploymentMonitoringJobsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_model_deployment_monitoring_jobs_async_from_dict(): - await test_list_model_deployment_monitoring_jobs_async(request_type=dict) - - -def test_list_model_deployment_monitoring_jobs_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.ListModelDeploymentMonitoringJobsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_deployment_monitoring_jobs), - '__call__') as call: - call.return_value = job_service.ListModelDeploymentMonitoringJobsResponse() - client.list_model_deployment_monitoring_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_model_deployment_monitoring_jobs_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.ListModelDeploymentMonitoringJobsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_deployment_monitoring_jobs), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListModelDeploymentMonitoringJobsResponse()) - await client.list_model_deployment_monitoring_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_model_deployment_monitoring_jobs_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_deployment_monitoring_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.ListModelDeploymentMonitoringJobsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_model_deployment_monitoring_jobs( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_model_deployment_monitoring_jobs_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_model_deployment_monitoring_jobs( - job_service.ListModelDeploymentMonitoringJobsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_model_deployment_monitoring_jobs_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_deployment_monitoring_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = job_service.ListModelDeploymentMonitoringJobsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(job_service.ListModelDeploymentMonitoringJobsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_model_deployment_monitoring_jobs( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_model_deployment_monitoring_jobs_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_model_deployment_monitoring_jobs( - job_service.ListModelDeploymentMonitoringJobsRequest(), - parent='parent_value', - ) - - -def test_list_model_deployment_monitoring_jobs_pager(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_deployment_monitoring_jobs), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListModelDeploymentMonitoringJobsResponse( - model_deployment_monitoring_jobs=[ - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - ], - next_page_token='abc', - ), - job_service.ListModelDeploymentMonitoringJobsResponse( - model_deployment_monitoring_jobs=[], - next_page_token='def', - ), - job_service.ListModelDeploymentMonitoringJobsResponse( - model_deployment_monitoring_jobs=[ - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - ], - next_page_token='ghi', - ), - job_service.ListModelDeploymentMonitoringJobsResponse( - model_deployment_monitoring_jobs=[ - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_model_deployment_monitoring_jobs(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, model_deployment_monitoring_job.ModelDeploymentMonitoringJob) - for i in results) - -def test_list_model_deployment_monitoring_jobs_pages(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_deployment_monitoring_jobs), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListModelDeploymentMonitoringJobsResponse( - model_deployment_monitoring_jobs=[ - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - ], - next_page_token='abc', - ), - job_service.ListModelDeploymentMonitoringJobsResponse( - model_deployment_monitoring_jobs=[], - next_page_token='def', - ), - job_service.ListModelDeploymentMonitoringJobsResponse( - model_deployment_monitoring_jobs=[ - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - ], - next_page_token='ghi', - ), - job_service.ListModelDeploymentMonitoringJobsResponse( - model_deployment_monitoring_jobs=[ - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - ], - ), - RuntimeError, - ) - pages = list(client.list_model_deployment_monitoring_jobs(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_model_deployment_monitoring_jobs_async_pager(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_deployment_monitoring_jobs), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListModelDeploymentMonitoringJobsResponse( - model_deployment_monitoring_jobs=[ - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - ], - next_page_token='abc', - ), - job_service.ListModelDeploymentMonitoringJobsResponse( - model_deployment_monitoring_jobs=[], - next_page_token='def', - ), - job_service.ListModelDeploymentMonitoringJobsResponse( - model_deployment_monitoring_jobs=[ - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - ], - next_page_token='ghi', - ), - job_service.ListModelDeploymentMonitoringJobsResponse( - model_deployment_monitoring_jobs=[ - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_model_deployment_monitoring_jobs(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, model_deployment_monitoring_job.ModelDeploymentMonitoringJob) - for i in responses) - -@pytest.mark.asyncio -async def test_list_model_deployment_monitoring_jobs_async_pages(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_deployment_monitoring_jobs), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - job_service.ListModelDeploymentMonitoringJobsResponse( - model_deployment_monitoring_jobs=[ - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - ], - next_page_token='abc', - ), - job_service.ListModelDeploymentMonitoringJobsResponse( - model_deployment_monitoring_jobs=[], - next_page_token='def', - ), - job_service.ListModelDeploymentMonitoringJobsResponse( - model_deployment_monitoring_jobs=[ - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - ], - next_page_token='ghi', - ), - job_service.ListModelDeploymentMonitoringJobsResponse( - model_deployment_monitoring_jobs=[ - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - model_deployment_monitoring_job.ModelDeploymentMonitoringJob(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_model_deployment_monitoring_jobs(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_update_model_deployment_monitoring_job(transport: str = 'grpc', request_type=job_service.UpdateModelDeploymentMonitoringJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.update_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.UpdateModelDeploymentMonitoringJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_update_model_deployment_monitoring_job_from_dict(): - test_update_model_deployment_monitoring_job(request_type=dict) - - -def test_update_model_deployment_monitoring_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_model_deployment_monitoring_job), - '__call__') as call: - client.update_model_deployment_monitoring_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.UpdateModelDeploymentMonitoringJobRequest() - - -@pytest.mark.asyncio -async def test_update_model_deployment_monitoring_job_async(transport: str = 'grpc_asyncio', request_type=job_service.UpdateModelDeploymentMonitoringJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.update_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.UpdateModelDeploymentMonitoringJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_update_model_deployment_monitoring_job_async_from_dict(): - await test_update_model_deployment_monitoring_job_async(request_type=dict) - - -def test_update_model_deployment_monitoring_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.UpdateModelDeploymentMonitoringJobRequest() - - request.model_deployment_monitoring_job.name = 'model_deployment_monitoring_job.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_model_deployment_monitoring_job), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.update_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'model_deployment_monitoring_job.name=model_deployment_monitoring_job.name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_model_deployment_monitoring_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.UpdateModelDeploymentMonitoringJobRequest() - - request.model_deployment_monitoring_job.name = 'model_deployment_monitoring_job.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_model_deployment_monitoring_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.update_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'model_deployment_monitoring_job.name=model_deployment_monitoring_job.name/value', - ) in kw['metadata'] - - -def test_update_model_deployment_monitoring_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_model_deployment_monitoring_job( - model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].model_deployment_monitoring_job - mock_val = gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -def test_update_model_deployment_monitoring_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_model_deployment_monitoring_job( - job_service.UpdateModelDeploymentMonitoringJobRequest(), - model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.asyncio -async def test_update_model_deployment_monitoring_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_model_deployment_monitoring_job( - model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].model_deployment_monitoring_job - mock_val = gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_update_model_deployment_monitoring_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_model_deployment_monitoring_job( - job_service.UpdateModelDeploymentMonitoringJobRequest(), - model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_delete_model_deployment_monitoring_job(transport: str = 'grpc', request_type=job_service.DeleteModelDeploymentMonitoringJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.DeleteModelDeploymentMonitoringJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_model_deployment_monitoring_job_from_dict(): - test_delete_model_deployment_monitoring_job(request_type=dict) - - -def test_delete_model_deployment_monitoring_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model_deployment_monitoring_job), - '__call__') as call: - client.delete_model_deployment_monitoring_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.DeleteModelDeploymentMonitoringJobRequest() - - -@pytest.mark.asyncio -async def test_delete_model_deployment_monitoring_job_async(transport: str = 'grpc_asyncio', request_type=job_service.DeleteModelDeploymentMonitoringJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.DeleteModelDeploymentMonitoringJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_model_deployment_monitoring_job_async_from_dict(): - await test_delete_model_deployment_monitoring_job_async(request_type=dict) - - -def test_delete_model_deployment_monitoring_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.DeleteModelDeploymentMonitoringJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model_deployment_monitoring_job), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_model_deployment_monitoring_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.DeleteModelDeploymentMonitoringJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model_deployment_monitoring_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_model_deployment_monitoring_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_model_deployment_monitoring_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_delete_model_deployment_monitoring_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_model_deployment_monitoring_job( - job_service.DeleteModelDeploymentMonitoringJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_model_deployment_monitoring_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_model_deployment_monitoring_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_model_deployment_monitoring_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_model_deployment_monitoring_job( - job_service.DeleteModelDeploymentMonitoringJobRequest(), - name='name_value', - ) - - -def test_pause_model_deployment_monitoring_job(transport: str = 'grpc', request_type=job_service.PauseModelDeploymentMonitoringJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.pause_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.pause_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.PauseModelDeploymentMonitoringJobRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -def test_pause_model_deployment_monitoring_job_from_dict(): - test_pause_model_deployment_monitoring_job(request_type=dict) - - -def test_pause_model_deployment_monitoring_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.pause_model_deployment_monitoring_job), - '__call__') as call: - client.pause_model_deployment_monitoring_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.PauseModelDeploymentMonitoringJobRequest() - - -@pytest.mark.asyncio -async def test_pause_model_deployment_monitoring_job_async(transport: str = 'grpc_asyncio', request_type=job_service.PauseModelDeploymentMonitoringJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.pause_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.pause_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.PauseModelDeploymentMonitoringJobRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_pause_model_deployment_monitoring_job_async_from_dict(): - await test_pause_model_deployment_monitoring_job_async(request_type=dict) - - -def test_pause_model_deployment_monitoring_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.PauseModelDeploymentMonitoringJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.pause_model_deployment_monitoring_job), - '__call__') as call: - call.return_value = None - client.pause_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_pause_model_deployment_monitoring_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.PauseModelDeploymentMonitoringJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.pause_model_deployment_monitoring_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.pause_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_pause_model_deployment_monitoring_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.pause_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.pause_model_deployment_monitoring_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_pause_model_deployment_monitoring_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.pause_model_deployment_monitoring_job( - job_service.PauseModelDeploymentMonitoringJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_pause_model_deployment_monitoring_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.pause_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.pause_model_deployment_monitoring_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_pause_model_deployment_monitoring_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.pause_model_deployment_monitoring_job( - job_service.PauseModelDeploymentMonitoringJobRequest(), - name='name_value', - ) - - -def test_resume_model_deployment_monitoring_job(transport: str = 'grpc', request_type=job_service.ResumeModelDeploymentMonitoringJobRequest): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.resume_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.resume_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ResumeModelDeploymentMonitoringJobRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -def test_resume_model_deployment_monitoring_job_from_dict(): - test_resume_model_deployment_monitoring_job(request_type=dict) - - -def test_resume_model_deployment_monitoring_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.resume_model_deployment_monitoring_job), - '__call__') as call: - client.resume_model_deployment_monitoring_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ResumeModelDeploymentMonitoringJobRequest() - - -@pytest.mark.asyncio -async def test_resume_model_deployment_monitoring_job_async(transport: str = 'grpc_asyncio', request_type=job_service.ResumeModelDeploymentMonitoringJobRequest): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.resume_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.resume_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == job_service.ResumeModelDeploymentMonitoringJobRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_resume_model_deployment_monitoring_job_async_from_dict(): - await test_resume_model_deployment_monitoring_job_async(request_type=dict) - - -def test_resume_model_deployment_monitoring_job_field_headers(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.ResumeModelDeploymentMonitoringJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.resume_model_deployment_monitoring_job), - '__call__') as call: - call.return_value = None - client.resume_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_resume_model_deployment_monitoring_job_field_headers_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = job_service.ResumeModelDeploymentMonitoringJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.resume_model_deployment_monitoring_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.resume_model_deployment_monitoring_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_resume_model_deployment_monitoring_job_flattened(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.resume_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.resume_model_deployment_monitoring_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_resume_model_deployment_monitoring_job_flattened_error(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.resume_model_deployment_monitoring_job( - job_service.ResumeModelDeploymentMonitoringJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_resume_model_deployment_monitoring_job_flattened_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.resume_model_deployment_monitoring_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.resume_model_deployment_monitoring_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_resume_model_deployment_monitoring_job_flattened_error_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.resume_model_deployment_monitoring_job( - job_service.ResumeModelDeploymentMonitoringJobRequest(), - name='name_value', - ) - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.JobServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.JobServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = JobServiceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.JobServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = JobServiceClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.JobServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = JobServiceClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.JobServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.JobServiceGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.JobServiceGrpcTransport, - transports.JobServiceGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.JobServiceGrpcTransport, - ) - -def test_job_service_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.JobServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_job_service_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.aiplatform_v1beta1.services.job_service.transports.JobServiceTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.JobServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'create_custom_job', - 'get_custom_job', - 'list_custom_jobs', - 'delete_custom_job', - 'cancel_custom_job', - 'create_data_labeling_job', - 'get_data_labeling_job', - 'list_data_labeling_jobs', - 'delete_data_labeling_job', - 'cancel_data_labeling_job', - 'create_hyperparameter_tuning_job', - 'get_hyperparameter_tuning_job', - 'list_hyperparameter_tuning_jobs', - 'delete_hyperparameter_tuning_job', - 'cancel_hyperparameter_tuning_job', - 'create_batch_prediction_job', - 'get_batch_prediction_job', - 'list_batch_prediction_jobs', - 'delete_batch_prediction_job', - 'cancel_batch_prediction_job', - 'create_model_deployment_monitoring_job', - 'search_model_deployment_monitoring_stats_anomalies', - 'get_model_deployment_monitoring_job', - 'list_model_deployment_monitoring_jobs', - 'update_model_deployment_monitoring_job', - 'delete_model_deployment_monitoring_job', - 'pause_model_deployment_monitoring_job', - 'resume_model_deployment_monitoring_job', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - with pytest.raises(NotImplementedError): - transport.close() - - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client - - -def test_job_service_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.job_service.transports.JobServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.JobServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -def test_job_service_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.job_service.transports.JobServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.JobServiceTransport() - adc.assert_called_once() - - -def test_job_service_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - JobServiceClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.JobServiceGrpcTransport, - transports.JobServiceGrpcAsyncIOTransport, - ], -) -def test_job_service_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.JobServiceGrpcTransport, grpc_helpers), - (transports.JobServiceGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_job_service_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "aiplatform.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="aiplatform.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.JobServiceGrpcTransport, transports.JobServiceGrpcAsyncIOTransport]) -def test_job_service_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_job_service_host_no_port(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_job_service_host_with_port(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:8000' - -def test_job_service_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.JobServiceGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_job_service_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.JobServiceGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.JobServiceGrpcTransport, transports.JobServiceGrpcAsyncIOTransport]) -def test_job_service_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.JobServiceGrpcTransport, transports.JobServiceGrpcAsyncIOTransport]) -def test_job_service_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_job_service_grpc_lro_client(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_job_service_grpc_lro_async_client(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc_asyncio', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsAsyncClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_batch_prediction_job_path(): - project = "squid" - location = "clam" - batch_prediction_job = "whelk" - expected = "projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}".format(project=project, location=location, batch_prediction_job=batch_prediction_job, ) - actual = JobServiceClient.batch_prediction_job_path(project, location, batch_prediction_job) - assert expected == actual - - -def test_parse_batch_prediction_job_path(): - expected = { - "project": "octopus", - "location": "oyster", - "batch_prediction_job": "nudibranch", - } - path = JobServiceClient.batch_prediction_job_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_batch_prediction_job_path(path) - assert expected == actual - -def test_custom_job_path(): - project = "cuttlefish" - location = "mussel" - custom_job = "winkle" - expected = "projects/{project}/locations/{location}/customJobs/{custom_job}".format(project=project, location=location, custom_job=custom_job, ) - actual = JobServiceClient.custom_job_path(project, location, custom_job) - assert expected == actual - - -def test_parse_custom_job_path(): - expected = { - "project": "nautilus", - "location": "scallop", - "custom_job": "abalone", - } - path = JobServiceClient.custom_job_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_custom_job_path(path) - assert expected == actual - -def test_data_labeling_job_path(): - project = "squid" - location = "clam" - data_labeling_job = "whelk" - expected = "projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}".format(project=project, location=location, data_labeling_job=data_labeling_job, ) - actual = JobServiceClient.data_labeling_job_path(project, location, data_labeling_job) - assert expected == actual - - -def test_parse_data_labeling_job_path(): - expected = { - "project": "octopus", - "location": "oyster", - "data_labeling_job": "nudibranch", - } - path = JobServiceClient.data_labeling_job_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_data_labeling_job_path(path) - assert expected == actual - -def test_dataset_path(): - project = "cuttlefish" - location = "mussel" - dataset = "winkle" - expected = "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) - actual = JobServiceClient.dataset_path(project, location, dataset) - assert expected == actual - - -def test_parse_dataset_path(): - expected = { - "project": "nautilus", - "location": "scallop", - "dataset": "abalone", - } - path = JobServiceClient.dataset_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_dataset_path(path) - assert expected == actual - -def test_endpoint_path(): - project = "squid" - location = "clam" - endpoint = "whelk" - expected = "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) - actual = JobServiceClient.endpoint_path(project, location, endpoint) - assert expected == actual - - -def test_parse_endpoint_path(): - expected = { - "project": "octopus", - "location": "oyster", - "endpoint": "nudibranch", - } - path = JobServiceClient.endpoint_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_endpoint_path(path) - assert expected == actual - -def test_hyperparameter_tuning_job_path(): - project = "cuttlefish" - location = "mussel" - hyperparameter_tuning_job = "winkle" - expected = "projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}".format(project=project, location=location, hyperparameter_tuning_job=hyperparameter_tuning_job, ) - actual = JobServiceClient.hyperparameter_tuning_job_path(project, location, hyperparameter_tuning_job) - assert expected == actual - - -def test_parse_hyperparameter_tuning_job_path(): - expected = { - "project": "nautilus", - "location": "scallop", - "hyperparameter_tuning_job": "abalone", - } - path = JobServiceClient.hyperparameter_tuning_job_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_hyperparameter_tuning_job_path(path) - assert expected == actual - -def test_model_path(): - project = "squid" - location = "clam" - model = "whelk" - expected = "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) - actual = JobServiceClient.model_path(project, location, model) - assert expected == actual - - -def test_parse_model_path(): - expected = { - "project": "octopus", - "location": "oyster", - "model": "nudibranch", - } - path = JobServiceClient.model_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_model_path(path) - assert expected == actual - -def test_model_deployment_monitoring_job_path(): - project = "cuttlefish" - location = "mussel" - model_deployment_monitoring_job = "winkle" - expected = "projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}".format(project=project, location=location, model_deployment_monitoring_job=model_deployment_monitoring_job, ) - actual = JobServiceClient.model_deployment_monitoring_job_path(project, location, model_deployment_monitoring_job) - assert expected == actual - - -def test_parse_model_deployment_monitoring_job_path(): - expected = { - "project": "nautilus", - "location": "scallop", - "model_deployment_monitoring_job": "abalone", - } - path = JobServiceClient.model_deployment_monitoring_job_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_model_deployment_monitoring_job_path(path) - assert expected == actual - -def test_network_path(): - project = "squid" - network = "clam" - expected = "projects/{project}/global/networks/{network}".format(project=project, network=network, ) - actual = JobServiceClient.network_path(project, network) - assert expected == actual - - -def test_parse_network_path(): - expected = { - "project": "whelk", - "network": "octopus", - } - path = JobServiceClient.network_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_network_path(path) - assert expected == actual - -def test_tensorboard_path(): - project = "oyster" - location = "nudibranch" - tensorboard = "cuttlefish" - expected = "projects/{project}/locations/{location}/tensorboards/{tensorboard}".format(project=project, location=location, tensorboard=tensorboard, ) - actual = JobServiceClient.tensorboard_path(project, location, tensorboard) - assert expected == actual - - -def test_parse_tensorboard_path(): - expected = { - "project": "mussel", - "location": "winkle", - "tensorboard": "nautilus", - } - path = JobServiceClient.tensorboard_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_tensorboard_path(path) - assert expected == actual - -def test_trial_path(): - project = "scallop" - location = "abalone" - study = "squid" - trial = "clam" - expected = "projects/{project}/locations/{location}/studies/{study}/trials/{trial}".format(project=project, location=location, study=study, trial=trial, ) - actual = JobServiceClient.trial_path(project, location, study, trial) - assert expected == actual - - -def test_parse_trial_path(): - expected = { - "project": "whelk", - "location": "octopus", - "study": "oyster", - "trial": "nudibranch", - } - path = JobServiceClient.trial_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_trial_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "cuttlefish" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = JobServiceClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "mussel", - } - path = JobServiceClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "winkle" - expected = "folders/{folder}".format(folder=folder, ) - actual = JobServiceClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "nautilus", - } - path = JobServiceClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "scallop" - expected = "organizations/{organization}".format(organization=organization, ) - actual = JobServiceClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "abalone", - } - path = JobServiceClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "squid" - expected = "projects/{project}".format(project=project, ) - actual = JobServiceClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "clam", - } - path = JobServiceClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "whelk" - location = "octopus" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = JobServiceClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "oyster", - "location": "nudibranch", - } - path = JobServiceClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = JobServiceClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.JobServiceTransport, '_prep_wrapped_messages') as prep: - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.JobServiceTransport, '_prep_wrapped_messages') as prep: - transport_class = JobServiceClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - -@pytest.mark.asyncio -async def test_transport_close_async(): - client = JobServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: - async with client: - close.assert_not_called() - close.assert_called_once() - -def test_transport_close(): - transports = { - "grpc": "_grpc_channel", - } - - for transport, close_name in transports.items(): - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: - with client: - close.assert_not_called() - close.assert_called_once() - -def test_client_ctx(): - transports = [ - 'grpc', - ] - for transport in transports: - client = JobServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - # Test client calls underlying transport. - with mock.patch.object(type(client.transport), "close") as close: - close.assert_not_called() - with client: - pass - close.assert_called() diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_metadata_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_metadata_service.py deleted file mode 100644 index be5080b0ec..0000000000 --- a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_metadata_service.py +++ /dev/null @@ -1,9706 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import future -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import operation_async # type: ignore -from google.api_core import operations_v1 -from google.api_core import path_template -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.aiplatform_v1beta1.services.metadata_service import MetadataServiceAsyncClient -from google.cloud.aiplatform_v1beta1.services.metadata_service import MetadataServiceClient -from google.cloud.aiplatform_v1beta1.services.metadata_service import pagers -from google.cloud.aiplatform_v1beta1.services.metadata_service import transports -from google.cloud.aiplatform_v1beta1.types import artifact -from google.cloud.aiplatform_v1beta1.types import artifact as gca_artifact -from google.cloud.aiplatform_v1beta1.types import context -from google.cloud.aiplatform_v1beta1.types import context as gca_context -from google.cloud.aiplatform_v1beta1.types import encryption_spec -from google.cloud.aiplatform_v1beta1.types import event -from google.cloud.aiplatform_v1beta1.types import execution -from google.cloud.aiplatform_v1beta1.types import execution as gca_execution -from google.cloud.aiplatform_v1beta1.types import lineage_subgraph -from google.cloud.aiplatform_v1beta1.types import metadata_schema -from google.cloud.aiplatform_v1beta1.types import metadata_schema as gca_metadata_schema -from google.cloud.aiplatform_v1beta1.types import metadata_service -from google.cloud.aiplatform_v1beta1.types import metadata_store -from google.cloud.aiplatform_v1beta1.types import metadata_store as gca_metadata_store -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.longrunning import operations_pb2 -from google.oauth2 import service_account -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -import google.auth - - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert MetadataServiceClient._get_default_mtls_endpoint(None) is None - assert MetadataServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert MetadataServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert MetadataServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert MetadataServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert MetadataServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - MetadataServiceClient, - MetadataServiceAsyncClient, -]) -def test_metadata_service_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.MetadataServiceGrpcTransport, "grpc"), - (transports.MetadataServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_metadata_service_client_service_account_always_use_jwt(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=False) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("client_class", [ - MetadataServiceClient, - MetadataServiceAsyncClient, -]) -def test_metadata_service_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_metadata_service_client_get_transport_class(): - transport = MetadataServiceClient.get_transport_class() - available_transports = [ - transports.MetadataServiceGrpcTransport, - ] - assert transport in available_transports - - transport = MetadataServiceClient.get_transport_class("grpc") - assert transport == transports.MetadataServiceGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (MetadataServiceClient, transports.MetadataServiceGrpcTransport, "grpc"), - (MetadataServiceAsyncClient, transports.MetadataServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(MetadataServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MetadataServiceClient)) -@mock.patch.object(MetadataServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MetadataServiceAsyncClient)) -def test_metadata_service_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(MetadataServiceClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(MetadataServiceClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (MetadataServiceClient, transports.MetadataServiceGrpcTransport, "grpc", "true"), - (MetadataServiceAsyncClient, transports.MetadataServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (MetadataServiceClient, transports.MetadataServiceGrpcTransport, "grpc", "false"), - (MetadataServiceAsyncClient, transports.MetadataServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(MetadataServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MetadataServiceClient)) -@mock.patch.object(MetadataServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MetadataServiceAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_metadata_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (MetadataServiceClient, transports.MetadataServiceGrpcTransport, "grpc"), - (MetadataServiceAsyncClient, transports.MetadataServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_metadata_service_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (MetadataServiceClient, transports.MetadataServiceGrpcTransport, "grpc"), - (MetadataServiceAsyncClient, transports.MetadataServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_metadata_service_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_metadata_service_client_client_options_from_dict(): - with mock.patch('google.cloud.aiplatform_v1beta1.services.metadata_service.transports.MetadataServiceGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = MetadataServiceClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_create_metadata_store(transport: str = 'grpc', request_type=metadata_service.CreateMetadataStoreRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_metadata_store), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.create_metadata_store(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.CreateMetadataStoreRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_create_metadata_store_from_dict(): - test_create_metadata_store(request_type=dict) - - -def test_create_metadata_store_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_metadata_store), - '__call__') as call: - client.create_metadata_store() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.CreateMetadataStoreRequest() - - -@pytest.mark.asyncio -async def test_create_metadata_store_async(transport: str = 'grpc_asyncio', request_type=metadata_service.CreateMetadataStoreRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_metadata_store), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.create_metadata_store(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.CreateMetadataStoreRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_create_metadata_store_async_from_dict(): - await test_create_metadata_store_async(request_type=dict) - - -def test_create_metadata_store_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.CreateMetadataStoreRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_metadata_store), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.create_metadata_store(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_metadata_store_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.CreateMetadataStoreRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_metadata_store), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.create_metadata_store(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_metadata_store_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_metadata_store), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_metadata_store( - parent='parent_value', - metadata_store=gca_metadata_store.MetadataStore(name='name_value'), - metadata_store_id='metadata_store_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].metadata_store - mock_val = gca_metadata_store.MetadataStore(name='name_value') - assert arg == mock_val - arg = args[0].metadata_store_id - mock_val = 'metadata_store_id_value' - assert arg == mock_val - - -def test_create_metadata_store_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_metadata_store( - metadata_service.CreateMetadataStoreRequest(), - parent='parent_value', - metadata_store=gca_metadata_store.MetadataStore(name='name_value'), - metadata_store_id='metadata_store_id_value', - ) - - -@pytest.mark.asyncio -async def test_create_metadata_store_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_metadata_store), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_metadata_store( - parent='parent_value', - metadata_store=gca_metadata_store.MetadataStore(name='name_value'), - metadata_store_id='metadata_store_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].metadata_store - mock_val = gca_metadata_store.MetadataStore(name='name_value') - assert arg == mock_val - arg = args[0].metadata_store_id - mock_val = 'metadata_store_id_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_metadata_store_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_metadata_store( - metadata_service.CreateMetadataStoreRequest(), - parent='parent_value', - metadata_store=gca_metadata_store.MetadataStore(name='name_value'), - metadata_store_id='metadata_store_id_value', - ) - - -def test_get_metadata_store(transport: str = 'grpc', request_type=metadata_service.GetMetadataStoreRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_metadata_store), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_store.MetadataStore( - name='name_value', - description='description_value', - ) - response = client.get_metadata_store(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.GetMetadataStoreRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, metadata_store.MetadataStore) - assert response.name == 'name_value' - assert response.description == 'description_value' - - -def test_get_metadata_store_from_dict(): - test_get_metadata_store(request_type=dict) - - -def test_get_metadata_store_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_metadata_store), - '__call__') as call: - client.get_metadata_store() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.GetMetadataStoreRequest() - - -@pytest.mark.asyncio -async def test_get_metadata_store_async(transport: str = 'grpc_asyncio', request_type=metadata_service.GetMetadataStoreRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_metadata_store), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(metadata_store.MetadataStore( - name='name_value', - description='description_value', - )) - response = await client.get_metadata_store(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.GetMetadataStoreRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, metadata_store.MetadataStore) - assert response.name == 'name_value' - assert response.description == 'description_value' - - -@pytest.mark.asyncio -async def test_get_metadata_store_async_from_dict(): - await test_get_metadata_store_async(request_type=dict) - - -def test_get_metadata_store_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.GetMetadataStoreRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_metadata_store), - '__call__') as call: - call.return_value = metadata_store.MetadataStore() - client.get_metadata_store(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_metadata_store_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.GetMetadataStoreRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_metadata_store), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_store.MetadataStore()) - await client.get_metadata_store(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_metadata_store_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_metadata_store), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_store.MetadataStore() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_metadata_store( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_metadata_store_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_metadata_store( - metadata_service.GetMetadataStoreRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_metadata_store_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_metadata_store), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_store.MetadataStore() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_store.MetadataStore()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_metadata_store( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_metadata_store_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_metadata_store( - metadata_service.GetMetadataStoreRequest(), - name='name_value', - ) - - -def test_list_metadata_stores(transport: str = 'grpc', request_type=metadata_service.ListMetadataStoresRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_stores), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.ListMetadataStoresResponse( - next_page_token='next_page_token_value', - ) - response = client.list_metadata_stores(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.ListMetadataStoresRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListMetadataStoresPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_metadata_stores_from_dict(): - test_list_metadata_stores(request_type=dict) - - -def test_list_metadata_stores_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_stores), - '__call__') as call: - client.list_metadata_stores() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.ListMetadataStoresRequest() - - -@pytest.mark.asyncio -async def test_list_metadata_stores_async(transport: str = 'grpc_asyncio', request_type=metadata_service.ListMetadataStoresRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_stores), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListMetadataStoresResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_metadata_stores(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.ListMetadataStoresRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListMetadataStoresAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_metadata_stores_async_from_dict(): - await test_list_metadata_stores_async(request_type=dict) - - -def test_list_metadata_stores_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.ListMetadataStoresRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_stores), - '__call__') as call: - call.return_value = metadata_service.ListMetadataStoresResponse() - client.list_metadata_stores(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_metadata_stores_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.ListMetadataStoresRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_stores), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListMetadataStoresResponse()) - await client.list_metadata_stores(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_metadata_stores_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_stores), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.ListMetadataStoresResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_metadata_stores( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_metadata_stores_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_metadata_stores( - metadata_service.ListMetadataStoresRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_metadata_stores_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_stores), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.ListMetadataStoresResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListMetadataStoresResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_metadata_stores( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_metadata_stores_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_metadata_stores( - metadata_service.ListMetadataStoresRequest(), - parent='parent_value', - ) - - -def test_list_metadata_stores_pager(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_stores), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - metadata_service.ListMetadataStoresResponse( - metadata_stores=[ - metadata_store.MetadataStore(), - metadata_store.MetadataStore(), - metadata_store.MetadataStore(), - ], - next_page_token='abc', - ), - metadata_service.ListMetadataStoresResponse( - metadata_stores=[], - next_page_token='def', - ), - metadata_service.ListMetadataStoresResponse( - metadata_stores=[ - metadata_store.MetadataStore(), - ], - next_page_token='ghi', - ), - metadata_service.ListMetadataStoresResponse( - metadata_stores=[ - metadata_store.MetadataStore(), - metadata_store.MetadataStore(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_metadata_stores(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, metadata_store.MetadataStore) - for i in results) - -def test_list_metadata_stores_pages(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_stores), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - metadata_service.ListMetadataStoresResponse( - metadata_stores=[ - metadata_store.MetadataStore(), - metadata_store.MetadataStore(), - metadata_store.MetadataStore(), - ], - next_page_token='abc', - ), - metadata_service.ListMetadataStoresResponse( - metadata_stores=[], - next_page_token='def', - ), - metadata_service.ListMetadataStoresResponse( - metadata_stores=[ - metadata_store.MetadataStore(), - ], - next_page_token='ghi', - ), - metadata_service.ListMetadataStoresResponse( - metadata_stores=[ - metadata_store.MetadataStore(), - metadata_store.MetadataStore(), - ], - ), - RuntimeError, - ) - pages = list(client.list_metadata_stores(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_metadata_stores_async_pager(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_stores), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - metadata_service.ListMetadataStoresResponse( - metadata_stores=[ - metadata_store.MetadataStore(), - metadata_store.MetadataStore(), - metadata_store.MetadataStore(), - ], - next_page_token='abc', - ), - metadata_service.ListMetadataStoresResponse( - metadata_stores=[], - next_page_token='def', - ), - metadata_service.ListMetadataStoresResponse( - metadata_stores=[ - metadata_store.MetadataStore(), - ], - next_page_token='ghi', - ), - metadata_service.ListMetadataStoresResponse( - metadata_stores=[ - metadata_store.MetadataStore(), - metadata_store.MetadataStore(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_metadata_stores(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, metadata_store.MetadataStore) - for i in responses) - -@pytest.mark.asyncio -async def test_list_metadata_stores_async_pages(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_stores), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - metadata_service.ListMetadataStoresResponse( - metadata_stores=[ - metadata_store.MetadataStore(), - metadata_store.MetadataStore(), - metadata_store.MetadataStore(), - ], - next_page_token='abc', - ), - metadata_service.ListMetadataStoresResponse( - metadata_stores=[], - next_page_token='def', - ), - metadata_service.ListMetadataStoresResponse( - metadata_stores=[ - metadata_store.MetadataStore(), - ], - next_page_token='ghi', - ), - metadata_service.ListMetadataStoresResponse( - metadata_stores=[ - metadata_store.MetadataStore(), - metadata_store.MetadataStore(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_metadata_stores(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_delete_metadata_store(transport: str = 'grpc', request_type=metadata_service.DeleteMetadataStoreRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_metadata_store), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_metadata_store(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.DeleteMetadataStoreRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_metadata_store_from_dict(): - test_delete_metadata_store(request_type=dict) - - -def test_delete_metadata_store_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_metadata_store), - '__call__') as call: - client.delete_metadata_store() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.DeleteMetadataStoreRequest() - - -@pytest.mark.asyncio -async def test_delete_metadata_store_async(transport: str = 'grpc_asyncio', request_type=metadata_service.DeleteMetadataStoreRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_metadata_store), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_metadata_store(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.DeleteMetadataStoreRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_metadata_store_async_from_dict(): - await test_delete_metadata_store_async(request_type=dict) - - -def test_delete_metadata_store_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.DeleteMetadataStoreRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_metadata_store), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_metadata_store(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_metadata_store_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.DeleteMetadataStoreRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_metadata_store), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_metadata_store(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_metadata_store_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_metadata_store), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_metadata_store( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_delete_metadata_store_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_metadata_store( - metadata_service.DeleteMetadataStoreRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_metadata_store_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_metadata_store), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_metadata_store( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_metadata_store_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_metadata_store( - metadata_service.DeleteMetadataStoreRequest(), - name='name_value', - ) - - -def test_create_artifact(transport: str = 'grpc', request_type=metadata_service.CreateArtifactRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_artifact), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_artifact.Artifact( - name='name_value', - display_name='display_name_value', - uri='uri_value', - etag='etag_value', - state=gca_artifact.Artifact.State.PENDING, - schema_title='schema_title_value', - schema_version='schema_version_value', - description='description_value', - ) - response = client.create_artifact(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.CreateArtifactRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_artifact.Artifact) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.uri == 'uri_value' - assert response.etag == 'etag_value' - assert response.state == gca_artifact.Artifact.State.PENDING - assert response.schema_title == 'schema_title_value' - assert response.schema_version == 'schema_version_value' - assert response.description == 'description_value' - - -def test_create_artifact_from_dict(): - test_create_artifact(request_type=dict) - - -def test_create_artifact_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_artifact), - '__call__') as call: - client.create_artifact() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.CreateArtifactRequest() - - -@pytest.mark.asyncio -async def test_create_artifact_async(transport: str = 'grpc_asyncio', request_type=metadata_service.CreateArtifactRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_artifact), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_artifact.Artifact( - name='name_value', - display_name='display_name_value', - uri='uri_value', - etag='etag_value', - state=gca_artifact.Artifact.State.PENDING, - schema_title='schema_title_value', - schema_version='schema_version_value', - description='description_value', - )) - response = await client.create_artifact(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.CreateArtifactRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_artifact.Artifact) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.uri == 'uri_value' - assert response.etag == 'etag_value' - assert response.state == gca_artifact.Artifact.State.PENDING - assert response.schema_title == 'schema_title_value' - assert response.schema_version == 'schema_version_value' - assert response.description == 'description_value' - - -@pytest.mark.asyncio -async def test_create_artifact_async_from_dict(): - await test_create_artifact_async(request_type=dict) - - -def test_create_artifact_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.CreateArtifactRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_artifact), - '__call__') as call: - call.return_value = gca_artifact.Artifact() - client.create_artifact(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_artifact_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.CreateArtifactRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_artifact), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_artifact.Artifact()) - await client.create_artifact(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_artifact_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_artifact), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_artifact.Artifact() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_artifact( - parent='parent_value', - artifact=gca_artifact.Artifact(name='name_value'), - artifact_id='artifact_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].artifact - mock_val = gca_artifact.Artifact(name='name_value') - assert arg == mock_val - arg = args[0].artifact_id - mock_val = 'artifact_id_value' - assert arg == mock_val - - -def test_create_artifact_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_artifact( - metadata_service.CreateArtifactRequest(), - parent='parent_value', - artifact=gca_artifact.Artifact(name='name_value'), - artifact_id='artifact_id_value', - ) - - -@pytest.mark.asyncio -async def test_create_artifact_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_artifact), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_artifact.Artifact() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_artifact.Artifact()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_artifact( - parent='parent_value', - artifact=gca_artifact.Artifact(name='name_value'), - artifact_id='artifact_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].artifact - mock_val = gca_artifact.Artifact(name='name_value') - assert arg == mock_val - arg = args[0].artifact_id - mock_val = 'artifact_id_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_artifact_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_artifact( - metadata_service.CreateArtifactRequest(), - parent='parent_value', - artifact=gca_artifact.Artifact(name='name_value'), - artifact_id='artifact_id_value', - ) - - -def test_get_artifact(transport: str = 'grpc', request_type=metadata_service.GetArtifactRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_artifact), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = artifact.Artifact( - name='name_value', - display_name='display_name_value', - uri='uri_value', - etag='etag_value', - state=artifact.Artifact.State.PENDING, - schema_title='schema_title_value', - schema_version='schema_version_value', - description='description_value', - ) - response = client.get_artifact(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.GetArtifactRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, artifact.Artifact) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.uri == 'uri_value' - assert response.etag == 'etag_value' - assert response.state == artifact.Artifact.State.PENDING - assert response.schema_title == 'schema_title_value' - assert response.schema_version == 'schema_version_value' - assert response.description == 'description_value' - - -def test_get_artifact_from_dict(): - test_get_artifact(request_type=dict) - - -def test_get_artifact_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_artifact), - '__call__') as call: - client.get_artifact() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.GetArtifactRequest() - - -@pytest.mark.asyncio -async def test_get_artifact_async(transport: str = 'grpc_asyncio', request_type=metadata_service.GetArtifactRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_artifact), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(artifact.Artifact( - name='name_value', - display_name='display_name_value', - uri='uri_value', - etag='etag_value', - state=artifact.Artifact.State.PENDING, - schema_title='schema_title_value', - schema_version='schema_version_value', - description='description_value', - )) - response = await client.get_artifact(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.GetArtifactRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, artifact.Artifact) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.uri == 'uri_value' - assert response.etag == 'etag_value' - assert response.state == artifact.Artifact.State.PENDING - assert response.schema_title == 'schema_title_value' - assert response.schema_version == 'schema_version_value' - assert response.description == 'description_value' - - -@pytest.mark.asyncio -async def test_get_artifact_async_from_dict(): - await test_get_artifact_async(request_type=dict) - - -def test_get_artifact_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.GetArtifactRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_artifact), - '__call__') as call: - call.return_value = artifact.Artifact() - client.get_artifact(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_artifact_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.GetArtifactRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_artifact), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(artifact.Artifact()) - await client.get_artifact(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_artifact_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_artifact), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = artifact.Artifact() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_artifact( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_artifact_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_artifact( - metadata_service.GetArtifactRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_artifact_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_artifact), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = artifact.Artifact() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(artifact.Artifact()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_artifact( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_artifact_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_artifact( - metadata_service.GetArtifactRequest(), - name='name_value', - ) - - -def test_list_artifacts(transport: str = 'grpc', request_type=metadata_service.ListArtifactsRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_artifacts), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.ListArtifactsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_artifacts(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.ListArtifactsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListArtifactsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_artifacts_from_dict(): - test_list_artifacts(request_type=dict) - - -def test_list_artifacts_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_artifacts), - '__call__') as call: - client.list_artifacts() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.ListArtifactsRequest() - - -@pytest.mark.asyncio -async def test_list_artifacts_async(transport: str = 'grpc_asyncio', request_type=metadata_service.ListArtifactsRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_artifacts), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListArtifactsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_artifacts(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.ListArtifactsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListArtifactsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_artifacts_async_from_dict(): - await test_list_artifacts_async(request_type=dict) - - -def test_list_artifacts_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.ListArtifactsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_artifacts), - '__call__') as call: - call.return_value = metadata_service.ListArtifactsResponse() - client.list_artifacts(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_artifacts_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.ListArtifactsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_artifacts), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListArtifactsResponse()) - await client.list_artifacts(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_artifacts_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_artifacts), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.ListArtifactsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_artifacts( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_artifacts_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_artifacts( - metadata_service.ListArtifactsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_artifacts_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_artifacts), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.ListArtifactsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListArtifactsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_artifacts( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_artifacts_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_artifacts( - metadata_service.ListArtifactsRequest(), - parent='parent_value', - ) - - -def test_list_artifacts_pager(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_artifacts), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - metadata_service.ListArtifactsResponse( - artifacts=[ - artifact.Artifact(), - artifact.Artifact(), - artifact.Artifact(), - ], - next_page_token='abc', - ), - metadata_service.ListArtifactsResponse( - artifacts=[], - next_page_token='def', - ), - metadata_service.ListArtifactsResponse( - artifacts=[ - artifact.Artifact(), - ], - next_page_token='ghi', - ), - metadata_service.ListArtifactsResponse( - artifacts=[ - artifact.Artifact(), - artifact.Artifact(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_artifacts(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, artifact.Artifact) - for i in results) - -def test_list_artifacts_pages(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_artifacts), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - metadata_service.ListArtifactsResponse( - artifacts=[ - artifact.Artifact(), - artifact.Artifact(), - artifact.Artifact(), - ], - next_page_token='abc', - ), - metadata_service.ListArtifactsResponse( - artifacts=[], - next_page_token='def', - ), - metadata_service.ListArtifactsResponse( - artifacts=[ - artifact.Artifact(), - ], - next_page_token='ghi', - ), - metadata_service.ListArtifactsResponse( - artifacts=[ - artifact.Artifact(), - artifact.Artifact(), - ], - ), - RuntimeError, - ) - pages = list(client.list_artifacts(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_artifacts_async_pager(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_artifacts), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - metadata_service.ListArtifactsResponse( - artifacts=[ - artifact.Artifact(), - artifact.Artifact(), - artifact.Artifact(), - ], - next_page_token='abc', - ), - metadata_service.ListArtifactsResponse( - artifacts=[], - next_page_token='def', - ), - metadata_service.ListArtifactsResponse( - artifacts=[ - artifact.Artifact(), - ], - next_page_token='ghi', - ), - metadata_service.ListArtifactsResponse( - artifacts=[ - artifact.Artifact(), - artifact.Artifact(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_artifacts(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, artifact.Artifact) - for i in responses) - -@pytest.mark.asyncio -async def test_list_artifacts_async_pages(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_artifacts), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - metadata_service.ListArtifactsResponse( - artifacts=[ - artifact.Artifact(), - artifact.Artifact(), - artifact.Artifact(), - ], - next_page_token='abc', - ), - metadata_service.ListArtifactsResponse( - artifacts=[], - next_page_token='def', - ), - metadata_service.ListArtifactsResponse( - artifacts=[ - artifact.Artifact(), - ], - next_page_token='ghi', - ), - metadata_service.ListArtifactsResponse( - artifacts=[ - artifact.Artifact(), - artifact.Artifact(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_artifacts(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_update_artifact(transport: str = 'grpc', request_type=metadata_service.UpdateArtifactRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_artifact), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_artifact.Artifact( - name='name_value', - display_name='display_name_value', - uri='uri_value', - etag='etag_value', - state=gca_artifact.Artifact.State.PENDING, - schema_title='schema_title_value', - schema_version='schema_version_value', - description='description_value', - ) - response = client.update_artifact(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.UpdateArtifactRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_artifact.Artifact) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.uri == 'uri_value' - assert response.etag == 'etag_value' - assert response.state == gca_artifact.Artifact.State.PENDING - assert response.schema_title == 'schema_title_value' - assert response.schema_version == 'schema_version_value' - assert response.description == 'description_value' - - -def test_update_artifact_from_dict(): - test_update_artifact(request_type=dict) - - -def test_update_artifact_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_artifact), - '__call__') as call: - client.update_artifact() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.UpdateArtifactRequest() - - -@pytest.mark.asyncio -async def test_update_artifact_async(transport: str = 'grpc_asyncio', request_type=metadata_service.UpdateArtifactRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_artifact), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_artifact.Artifact( - name='name_value', - display_name='display_name_value', - uri='uri_value', - etag='etag_value', - state=gca_artifact.Artifact.State.PENDING, - schema_title='schema_title_value', - schema_version='schema_version_value', - description='description_value', - )) - response = await client.update_artifact(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.UpdateArtifactRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_artifact.Artifact) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.uri == 'uri_value' - assert response.etag == 'etag_value' - assert response.state == gca_artifact.Artifact.State.PENDING - assert response.schema_title == 'schema_title_value' - assert response.schema_version == 'schema_version_value' - assert response.description == 'description_value' - - -@pytest.mark.asyncio -async def test_update_artifact_async_from_dict(): - await test_update_artifact_async(request_type=dict) - - -def test_update_artifact_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.UpdateArtifactRequest() - - request.artifact.name = 'artifact.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_artifact), - '__call__') as call: - call.return_value = gca_artifact.Artifact() - client.update_artifact(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'artifact.name=artifact.name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_artifact_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.UpdateArtifactRequest() - - request.artifact.name = 'artifact.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_artifact), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_artifact.Artifact()) - await client.update_artifact(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'artifact.name=artifact.name/value', - ) in kw['metadata'] - - -def test_update_artifact_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_artifact), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_artifact.Artifact() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_artifact( - artifact=gca_artifact.Artifact(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].artifact - mock_val = gca_artifact.Artifact(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -def test_update_artifact_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_artifact( - metadata_service.UpdateArtifactRequest(), - artifact=gca_artifact.Artifact(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.asyncio -async def test_update_artifact_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_artifact), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_artifact.Artifact() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_artifact.Artifact()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_artifact( - artifact=gca_artifact.Artifact(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].artifact - mock_val = gca_artifact.Artifact(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_update_artifact_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_artifact( - metadata_service.UpdateArtifactRequest(), - artifact=gca_artifact.Artifact(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_delete_artifact(transport: str = 'grpc', request_type=metadata_service.DeleteArtifactRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_artifact), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_artifact(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.DeleteArtifactRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_artifact_from_dict(): - test_delete_artifact(request_type=dict) - - -def test_delete_artifact_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_artifact), - '__call__') as call: - client.delete_artifact() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.DeleteArtifactRequest() - - -@pytest.mark.asyncio -async def test_delete_artifact_async(transport: str = 'grpc_asyncio', request_type=metadata_service.DeleteArtifactRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_artifact), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_artifact(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.DeleteArtifactRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_artifact_async_from_dict(): - await test_delete_artifact_async(request_type=dict) - - -def test_delete_artifact_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.DeleteArtifactRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_artifact), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_artifact(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_artifact_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.DeleteArtifactRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_artifact), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_artifact(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_artifact_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_artifact), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_artifact( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_delete_artifact_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_artifact( - metadata_service.DeleteArtifactRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_artifact_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_artifact), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_artifact( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_artifact_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_artifact( - metadata_service.DeleteArtifactRequest(), - name='name_value', - ) - - -def test_purge_artifacts(transport: str = 'grpc', request_type=metadata_service.PurgeArtifactsRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.purge_artifacts), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.purge_artifacts(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.PurgeArtifactsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_purge_artifacts_from_dict(): - test_purge_artifacts(request_type=dict) - - -def test_purge_artifacts_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.purge_artifacts), - '__call__') as call: - client.purge_artifacts() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.PurgeArtifactsRequest() - - -@pytest.mark.asyncio -async def test_purge_artifacts_async(transport: str = 'grpc_asyncio', request_type=metadata_service.PurgeArtifactsRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.purge_artifacts), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.purge_artifacts(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.PurgeArtifactsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_purge_artifacts_async_from_dict(): - await test_purge_artifacts_async(request_type=dict) - - -def test_purge_artifacts_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.PurgeArtifactsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.purge_artifacts), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.purge_artifacts(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_purge_artifacts_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.PurgeArtifactsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.purge_artifacts), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.purge_artifacts(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_purge_artifacts_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.purge_artifacts), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.purge_artifacts( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_purge_artifacts_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.purge_artifacts( - metadata_service.PurgeArtifactsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_purge_artifacts_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.purge_artifacts), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.purge_artifacts( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_purge_artifacts_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.purge_artifacts( - metadata_service.PurgeArtifactsRequest(), - parent='parent_value', - ) - - -def test_create_context(transport: str = 'grpc', request_type=metadata_service.CreateContextRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_context), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_context.Context( - name='name_value', - display_name='display_name_value', - etag='etag_value', - parent_contexts=['parent_contexts_value'], - schema_title='schema_title_value', - schema_version='schema_version_value', - description='description_value', - ) - response = client.create_context(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.CreateContextRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_context.Context) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.etag == 'etag_value' - assert response.parent_contexts == ['parent_contexts_value'] - assert response.schema_title == 'schema_title_value' - assert response.schema_version == 'schema_version_value' - assert response.description == 'description_value' - - -def test_create_context_from_dict(): - test_create_context(request_type=dict) - - -def test_create_context_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_context), - '__call__') as call: - client.create_context() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.CreateContextRequest() - - -@pytest.mark.asyncio -async def test_create_context_async(transport: str = 'grpc_asyncio', request_type=metadata_service.CreateContextRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_context), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_context.Context( - name='name_value', - display_name='display_name_value', - etag='etag_value', - parent_contexts=['parent_contexts_value'], - schema_title='schema_title_value', - schema_version='schema_version_value', - description='description_value', - )) - response = await client.create_context(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.CreateContextRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_context.Context) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.etag == 'etag_value' - assert response.parent_contexts == ['parent_contexts_value'] - assert response.schema_title == 'schema_title_value' - assert response.schema_version == 'schema_version_value' - assert response.description == 'description_value' - - -@pytest.mark.asyncio -async def test_create_context_async_from_dict(): - await test_create_context_async(request_type=dict) - - -def test_create_context_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.CreateContextRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_context), - '__call__') as call: - call.return_value = gca_context.Context() - client.create_context(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_context_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.CreateContextRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_context), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_context.Context()) - await client.create_context(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_context_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_context), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_context.Context() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_context( - parent='parent_value', - context=gca_context.Context(name='name_value'), - context_id='context_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].context - mock_val = gca_context.Context(name='name_value') - assert arg == mock_val - arg = args[0].context_id - mock_val = 'context_id_value' - assert arg == mock_val - - -def test_create_context_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_context( - metadata_service.CreateContextRequest(), - parent='parent_value', - context=gca_context.Context(name='name_value'), - context_id='context_id_value', - ) - - -@pytest.mark.asyncio -async def test_create_context_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_context), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_context.Context() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_context.Context()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_context( - parent='parent_value', - context=gca_context.Context(name='name_value'), - context_id='context_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].context - mock_val = gca_context.Context(name='name_value') - assert arg == mock_val - arg = args[0].context_id - mock_val = 'context_id_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_context_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_context( - metadata_service.CreateContextRequest(), - parent='parent_value', - context=gca_context.Context(name='name_value'), - context_id='context_id_value', - ) - - -def test_get_context(transport: str = 'grpc', request_type=metadata_service.GetContextRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_context), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = context.Context( - name='name_value', - display_name='display_name_value', - etag='etag_value', - parent_contexts=['parent_contexts_value'], - schema_title='schema_title_value', - schema_version='schema_version_value', - description='description_value', - ) - response = client.get_context(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.GetContextRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, context.Context) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.etag == 'etag_value' - assert response.parent_contexts == ['parent_contexts_value'] - assert response.schema_title == 'schema_title_value' - assert response.schema_version == 'schema_version_value' - assert response.description == 'description_value' - - -def test_get_context_from_dict(): - test_get_context(request_type=dict) - - -def test_get_context_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_context), - '__call__') as call: - client.get_context() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.GetContextRequest() - - -@pytest.mark.asyncio -async def test_get_context_async(transport: str = 'grpc_asyncio', request_type=metadata_service.GetContextRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_context), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(context.Context( - name='name_value', - display_name='display_name_value', - etag='etag_value', - parent_contexts=['parent_contexts_value'], - schema_title='schema_title_value', - schema_version='schema_version_value', - description='description_value', - )) - response = await client.get_context(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.GetContextRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, context.Context) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.etag == 'etag_value' - assert response.parent_contexts == ['parent_contexts_value'] - assert response.schema_title == 'schema_title_value' - assert response.schema_version == 'schema_version_value' - assert response.description == 'description_value' - - -@pytest.mark.asyncio -async def test_get_context_async_from_dict(): - await test_get_context_async(request_type=dict) - - -def test_get_context_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.GetContextRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_context), - '__call__') as call: - call.return_value = context.Context() - client.get_context(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_context_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.GetContextRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_context), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(context.Context()) - await client.get_context(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_context_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_context), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = context.Context() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_context( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_context_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_context( - metadata_service.GetContextRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_context_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_context), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = context.Context() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(context.Context()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_context( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_context_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_context( - metadata_service.GetContextRequest(), - name='name_value', - ) - - -def test_list_contexts(transport: str = 'grpc', request_type=metadata_service.ListContextsRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_contexts), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.ListContextsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_contexts(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.ListContextsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListContextsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_contexts_from_dict(): - test_list_contexts(request_type=dict) - - -def test_list_contexts_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_contexts), - '__call__') as call: - client.list_contexts() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.ListContextsRequest() - - -@pytest.mark.asyncio -async def test_list_contexts_async(transport: str = 'grpc_asyncio', request_type=metadata_service.ListContextsRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_contexts), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListContextsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_contexts(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.ListContextsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListContextsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_contexts_async_from_dict(): - await test_list_contexts_async(request_type=dict) - - -def test_list_contexts_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.ListContextsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_contexts), - '__call__') as call: - call.return_value = metadata_service.ListContextsResponse() - client.list_contexts(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_contexts_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.ListContextsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_contexts), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListContextsResponse()) - await client.list_contexts(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_contexts_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_contexts), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.ListContextsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_contexts( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_contexts_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_contexts( - metadata_service.ListContextsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_contexts_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_contexts), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.ListContextsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListContextsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_contexts( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_contexts_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_contexts( - metadata_service.ListContextsRequest(), - parent='parent_value', - ) - - -def test_list_contexts_pager(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_contexts), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - metadata_service.ListContextsResponse( - contexts=[ - context.Context(), - context.Context(), - context.Context(), - ], - next_page_token='abc', - ), - metadata_service.ListContextsResponse( - contexts=[], - next_page_token='def', - ), - metadata_service.ListContextsResponse( - contexts=[ - context.Context(), - ], - next_page_token='ghi', - ), - metadata_service.ListContextsResponse( - contexts=[ - context.Context(), - context.Context(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_contexts(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, context.Context) - for i in results) - -def test_list_contexts_pages(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_contexts), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - metadata_service.ListContextsResponse( - contexts=[ - context.Context(), - context.Context(), - context.Context(), - ], - next_page_token='abc', - ), - metadata_service.ListContextsResponse( - contexts=[], - next_page_token='def', - ), - metadata_service.ListContextsResponse( - contexts=[ - context.Context(), - ], - next_page_token='ghi', - ), - metadata_service.ListContextsResponse( - contexts=[ - context.Context(), - context.Context(), - ], - ), - RuntimeError, - ) - pages = list(client.list_contexts(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_contexts_async_pager(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_contexts), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - metadata_service.ListContextsResponse( - contexts=[ - context.Context(), - context.Context(), - context.Context(), - ], - next_page_token='abc', - ), - metadata_service.ListContextsResponse( - contexts=[], - next_page_token='def', - ), - metadata_service.ListContextsResponse( - contexts=[ - context.Context(), - ], - next_page_token='ghi', - ), - metadata_service.ListContextsResponse( - contexts=[ - context.Context(), - context.Context(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_contexts(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, context.Context) - for i in responses) - -@pytest.mark.asyncio -async def test_list_contexts_async_pages(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_contexts), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - metadata_service.ListContextsResponse( - contexts=[ - context.Context(), - context.Context(), - context.Context(), - ], - next_page_token='abc', - ), - metadata_service.ListContextsResponse( - contexts=[], - next_page_token='def', - ), - metadata_service.ListContextsResponse( - contexts=[ - context.Context(), - ], - next_page_token='ghi', - ), - metadata_service.ListContextsResponse( - contexts=[ - context.Context(), - context.Context(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_contexts(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_update_context(transport: str = 'grpc', request_type=metadata_service.UpdateContextRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_context), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_context.Context( - name='name_value', - display_name='display_name_value', - etag='etag_value', - parent_contexts=['parent_contexts_value'], - schema_title='schema_title_value', - schema_version='schema_version_value', - description='description_value', - ) - response = client.update_context(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.UpdateContextRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_context.Context) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.etag == 'etag_value' - assert response.parent_contexts == ['parent_contexts_value'] - assert response.schema_title == 'schema_title_value' - assert response.schema_version == 'schema_version_value' - assert response.description == 'description_value' - - -def test_update_context_from_dict(): - test_update_context(request_type=dict) - - -def test_update_context_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_context), - '__call__') as call: - client.update_context() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.UpdateContextRequest() - - -@pytest.mark.asyncio -async def test_update_context_async(transport: str = 'grpc_asyncio', request_type=metadata_service.UpdateContextRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_context), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_context.Context( - name='name_value', - display_name='display_name_value', - etag='etag_value', - parent_contexts=['parent_contexts_value'], - schema_title='schema_title_value', - schema_version='schema_version_value', - description='description_value', - )) - response = await client.update_context(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.UpdateContextRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_context.Context) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.etag == 'etag_value' - assert response.parent_contexts == ['parent_contexts_value'] - assert response.schema_title == 'schema_title_value' - assert response.schema_version == 'schema_version_value' - assert response.description == 'description_value' - - -@pytest.mark.asyncio -async def test_update_context_async_from_dict(): - await test_update_context_async(request_type=dict) - - -def test_update_context_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.UpdateContextRequest() - - request.context.name = 'context.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_context), - '__call__') as call: - call.return_value = gca_context.Context() - client.update_context(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'context.name=context.name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_context_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.UpdateContextRequest() - - request.context.name = 'context.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_context), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_context.Context()) - await client.update_context(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'context.name=context.name/value', - ) in kw['metadata'] - - -def test_update_context_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_context), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_context.Context() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_context( - context=gca_context.Context(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].context - mock_val = gca_context.Context(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -def test_update_context_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_context( - metadata_service.UpdateContextRequest(), - context=gca_context.Context(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.asyncio -async def test_update_context_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_context), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_context.Context() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_context.Context()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_context( - context=gca_context.Context(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].context - mock_val = gca_context.Context(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_update_context_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_context( - metadata_service.UpdateContextRequest(), - context=gca_context.Context(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_delete_context(transport: str = 'grpc', request_type=metadata_service.DeleteContextRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_context), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_context(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.DeleteContextRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_context_from_dict(): - test_delete_context(request_type=dict) - - -def test_delete_context_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_context), - '__call__') as call: - client.delete_context() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.DeleteContextRequest() - - -@pytest.mark.asyncio -async def test_delete_context_async(transport: str = 'grpc_asyncio', request_type=metadata_service.DeleteContextRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_context), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_context(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.DeleteContextRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_context_async_from_dict(): - await test_delete_context_async(request_type=dict) - - -def test_delete_context_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.DeleteContextRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_context), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_context(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_context_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.DeleteContextRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_context), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_context(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_context_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_context), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_context( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_delete_context_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_context( - metadata_service.DeleteContextRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_context_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_context), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_context( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_context_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_context( - metadata_service.DeleteContextRequest(), - name='name_value', - ) - - -def test_purge_contexts(transport: str = 'grpc', request_type=metadata_service.PurgeContextsRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.purge_contexts), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.purge_contexts(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.PurgeContextsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_purge_contexts_from_dict(): - test_purge_contexts(request_type=dict) - - -def test_purge_contexts_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.purge_contexts), - '__call__') as call: - client.purge_contexts() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.PurgeContextsRequest() - - -@pytest.mark.asyncio -async def test_purge_contexts_async(transport: str = 'grpc_asyncio', request_type=metadata_service.PurgeContextsRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.purge_contexts), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.purge_contexts(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.PurgeContextsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_purge_contexts_async_from_dict(): - await test_purge_contexts_async(request_type=dict) - - -def test_purge_contexts_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.PurgeContextsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.purge_contexts), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.purge_contexts(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_purge_contexts_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.PurgeContextsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.purge_contexts), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.purge_contexts(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_purge_contexts_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.purge_contexts), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.purge_contexts( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_purge_contexts_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.purge_contexts( - metadata_service.PurgeContextsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_purge_contexts_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.purge_contexts), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.purge_contexts( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_purge_contexts_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.purge_contexts( - metadata_service.PurgeContextsRequest(), - parent='parent_value', - ) - - -def test_add_context_artifacts_and_executions(transport: str = 'grpc', request_type=metadata_service.AddContextArtifactsAndExecutionsRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_context_artifacts_and_executions), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.AddContextArtifactsAndExecutionsResponse( - ) - response = client.add_context_artifacts_and_executions(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.AddContextArtifactsAndExecutionsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, metadata_service.AddContextArtifactsAndExecutionsResponse) - - -def test_add_context_artifacts_and_executions_from_dict(): - test_add_context_artifacts_and_executions(request_type=dict) - - -def test_add_context_artifacts_and_executions_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_context_artifacts_and_executions), - '__call__') as call: - client.add_context_artifacts_and_executions() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.AddContextArtifactsAndExecutionsRequest() - - -@pytest.mark.asyncio -async def test_add_context_artifacts_and_executions_async(transport: str = 'grpc_asyncio', request_type=metadata_service.AddContextArtifactsAndExecutionsRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_context_artifacts_and_executions), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.AddContextArtifactsAndExecutionsResponse( - )) - response = await client.add_context_artifacts_and_executions(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.AddContextArtifactsAndExecutionsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, metadata_service.AddContextArtifactsAndExecutionsResponse) - - -@pytest.mark.asyncio -async def test_add_context_artifacts_and_executions_async_from_dict(): - await test_add_context_artifacts_and_executions_async(request_type=dict) - - -def test_add_context_artifacts_and_executions_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.AddContextArtifactsAndExecutionsRequest() - - request.context = 'context/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_context_artifacts_and_executions), - '__call__') as call: - call.return_value = metadata_service.AddContextArtifactsAndExecutionsResponse() - client.add_context_artifacts_and_executions(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'context=context/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_add_context_artifacts_and_executions_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.AddContextArtifactsAndExecutionsRequest() - - request.context = 'context/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_context_artifacts_and_executions), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.AddContextArtifactsAndExecutionsResponse()) - await client.add_context_artifacts_and_executions(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'context=context/value', - ) in kw['metadata'] - - -def test_add_context_artifacts_and_executions_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_context_artifacts_and_executions), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.AddContextArtifactsAndExecutionsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.add_context_artifacts_and_executions( - context='context_value', - artifacts=['artifacts_value'], - executions=['executions_value'], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].context - mock_val = 'context_value' - assert arg == mock_val - arg = args[0].artifacts - mock_val = ['artifacts_value'] - assert arg == mock_val - arg = args[0].executions - mock_val = ['executions_value'] - assert arg == mock_val - - -def test_add_context_artifacts_and_executions_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.add_context_artifacts_and_executions( - metadata_service.AddContextArtifactsAndExecutionsRequest(), - context='context_value', - artifacts=['artifacts_value'], - executions=['executions_value'], - ) - - -@pytest.mark.asyncio -async def test_add_context_artifacts_and_executions_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_context_artifacts_and_executions), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.AddContextArtifactsAndExecutionsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.AddContextArtifactsAndExecutionsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.add_context_artifacts_and_executions( - context='context_value', - artifacts=['artifacts_value'], - executions=['executions_value'], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].context - mock_val = 'context_value' - assert arg == mock_val - arg = args[0].artifacts - mock_val = ['artifacts_value'] - assert arg == mock_val - arg = args[0].executions - mock_val = ['executions_value'] - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_add_context_artifacts_and_executions_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.add_context_artifacts_and_executions( - metadata_service.AddContextArtifactsAndExecutionsRequest(), - context='context_value', - artifacts=['artifacts_value'], - executions=['executions_value'], - ) - - -def test_add_context_children(transport: str = 'grpc', request_type=metadata_service.AddContextChildrenRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_context_children), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.AddContextChildrenResponse( - ) - response = client.add_context_children(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.AddContextChildrenRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, metadata_service.AddContextChildrenResponse) - - -def test_add_context_children_from_dict(): - test_add_context_children(request_type=dict) - - -def test_add_context_children_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_context_children), - '__call__') as call: - client.add_context_children() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.AddContextChildrenRequest() - - -@pytest.mark.asyncio -async def test_add_context_children_async(transport: str = 'grpc_asyncio', request_type=metadata_service.AddContextChildrenRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_context_children), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.AddContextChildrenResponse( - )) - response = await client.add_context_children(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.AddContextChildrenRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, metadata_service.AddContextChildrenResponse) - - -@pytest.mark.asyncio -async def test_add_context_children_async_from_dict(): - await test_add_context_children_async(request_type=dict) - - -def test_add_context_children_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.AddContextChildrenRequest() - - request.context = 'context/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_context_children), - '__call__') as call: - call.return_value = metadata_service.AddContextChildrenResponse() - client.add_context_children(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'context=context/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_add_context_children_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.AddContextChildrenRequest() - - request.context = 'context/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_context_children), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.AddContextChildrenResponse()) - await client.add_context_children(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'context=context/value', - ) in kw['metadata'] - - -def test_add_context_children_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_context_children), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.AddContextChildrenResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.add_context_children( - context='context_value', - child_contexts=['child_contexts_value'], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].context - mock_val = 'context_value' - assert arg == mock_val - arg = args[0].child_contexts - mock_val = ['child_contexts_value'] - assert arg == mock_val - - -def test_add_context_children_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.add_context_children( - metadata_service.AddContextChildrenRequest(), - context='context_value', - child_contexts=['child_contexts_value'], - ) - - -@pytest.mark.asyncio -async def test_add_context_children_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_context_children), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.AddContextChildrenResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.AddContextChildrenResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.add_context_children( - context='context_value', - child_contexts=['child_contexts_value'], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].context - mock_val = 'context_value' - assert arg == mock_val - arg = args[0].child_contexts - mock_val = ['child_contexts_value'] - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_add_context_children_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.add_context_children( - metadata_service.AddContextChildrenRequest(), - context='context_value', - child_contexts=['child_contexts_value'], - ) - - -def test_query_context_lineage_subgraph(transport: str = 'grpc', request_type=metadata_service.QueryContextLineageSubgraphRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_context_lineage_subgraph), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = lineage_subgraph.LineageSubgraph( - ) - response = client.query_context_lineage_subgraph(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.QueryContextLineageSubgraphRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, lineage_subgraph.LineageSubgraph) - - -def test_query_context_lineage_subgraph_from_dict(): - test_query_context_lineage_subgraph(request_type=dict) - - -def test_query_context_lineage_subgraph_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_context_lineage_subgraph), - '__call__') as call: - client.query_context_lineage_subgraph() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.QueryContextLineageSubgraphRequest() - - -@pytest.mark.asyncio -async def test_query_context_lineage_subgraph_async(transport: str = 'grpc_asyncio', request_type=metadata_service.QueryContextLineageSubgraphRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_context_lineage_subgraph), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(lineage_subgraph.LineageSubgraph( - )) - response = await client.query_context_lineage_subgraph(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.QueryContextLineageSubgraphRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, lineage_subgraph.LineageSubgraph) - - -@pytest.mark.asyncio -async def test_query_context_lineage_subgraph_async_from_dict(): - await test_query_context_lineage_subgraph_async(request_type=dict) - - -def test_query_context_lineage_subgraph_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.QueryContextLineageSubgraphRequest() - - request.context = 'context/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_context_lineage_subgraph), - '__call__') as call: - call.return_value = lineage_subgraph.LineageSubgraph() - client.query_context_lineage_subgraph(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'context=context/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_query_context_lineage_subgraph_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.QueryContextLineageSubgraphRequest() - - request.context = 'context/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_context_lineage_subgraph), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(lineage_subgraph.LineageSubgraph()) - await client.query_context_lineage_subgraph(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'context=context/value', - ) in kw['metadata'] - - -def test_query_context_lineage_subgraph_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_context_lineage_subgraph), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = lineage_subgraph.LineageSubgraph() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.query_context_lineage_subgraph( - context='context_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].context - mock_val = 'context_value' - assert arg == mock_val - - -def test_query_context_lineage_subgraph_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.query_context_lineage_subgraph( - metadata_service.QueryContextLineageSubgraphRequest(), - context='context_value', - ) - - -@pytest.mark.asyncio -async def test_query_context_lineage_subgraph_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_context_lineage_subgraph), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = lineage_subgraph.LineageSubgraph() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(lineage_subgraph.LineageSubgraph()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.query_context_lineage_subgraph( - context='context_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].context - mock_val = 'context_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_query_context_lineage_subgraph_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.query_context_lineage_subgraph( - metadata_service.QueryContextLineageSubgraphRequest(), - context='context_value', - ) - - -def test_create_execution(transport: str = 'grpc', request_type=metadata_service.CreateExecutionRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_execution), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_execution.Execution( - name='name_value', - display_name='display_name_value', - state=gca_execution.Execution.State.NEW, - etag='etag_value', - schema_title='schema_title_value', - schema_version='schema_version_value', - description='description_value', - ) - response = client.create_execution(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.CreateExecutionRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_execution.Execution) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == gca_execution.Execution.State.NEW - assert response.etag == 'etag_value' - assert response.schema_title == 'schema_title_value' - assert response.schema_version == 'schema_version_value' - assert response.description == 'description_value' - - -def test_create_execution_from_dict(): - test_create_execution(request_type=dict) - - -def test_create_execution_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_execution), - '__call__') as call: - client.create_execution() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.CreateExecutionRequest() - - -@pytest.mark.asyncio -async def test_create_execution_async(transport: str = 'grpc_asyncio', request_type=metadata_service.CreateExecutionRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_execution), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_execution.Execution( - name='name_value', - display_name='display_name_value', - state=gca_execution.Execution.State.NEW, - etag='etag_value', - schema_title='schema_title_value', - schema_version='schema_version_value', - description='description_value', - )) - response = await client.create_execution(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.CreateExecutionRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_execution.Execution) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == gca_execution.Execution.State.NEW - assert response.etag == 'etag_value' - assert response.schema_title == 'schema_title_value' - assert response.schema_version == 'schema_version_value' - assert response.description == 'description_value' - - -@pytest.mark.asyncio -async def test_create_execution_async_from_dict(): - await test_create_execution_async(request_type=dict) - - -def test_create_execution_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.CreateExecutionRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_execution), - '__call__') as call: - call.return_value = gca_execution.Execution() - client.create_execution(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_execution_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.CreateExecutionRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_execution), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_execution.Execution()) - await client.create_execution(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_execution_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_execution), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_execution.Execution() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_execution( - parent='parent_value', - execution=gca_execution.Execution(name='name_value'), - execution_id='execution_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].execution - mock_val = gca_execution.Execution(name='name_value') - assert arg == mock_val - arg = args[0].execution_id - mock_val = 'execution_id_value' - assert arg == mock_val - - -def test_create_execution_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_execution( - metadata_service.CreateExecutionRequest(), - parent='parent_value', - execution=gca_execution.Execution(name='name_value'), - execution_id='execution_id_value', - ) - - -@pytest.mark.asyncio -async def test_create_execution_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_execution), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_execution.Execution() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_execution.Execution()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_execution( - parent='parent_value', - execution=gca_execution.Execution(name='name_value'), - execution_id='execution_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].execution - mock_val = gca_execution.Execution(name='name_value') - assert arg == mock_val - arg = args[0].execution_id - mock_val = 'execution_id_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_execution_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_execution( - metadata_service.CreateExecutionRequest(), - parent='parent_value', - execution=gca_execution.Execution(name='name_value'), - execution_id='execution_id_value', - ) - - -def test_get_execution(transport: str = 'grpc', request_type=metadata_service.GetExecutionRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_execution), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = execution.Execution( - name='name_value', - display_name='display_name_value', - state=execution.Execution.State.NEW, - etag='etag_value', - schema_title='schema_title_value', - schema_version='schema_version_value', - description='description_value', - ) - response = client.get_execution(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.GetExecutionRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, execution.Execution) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == execution.Execution.State.NEW - assert response.etag == 'etag_value' - assert response.schema_title == 'schema_title_value' - assert response.schema_version == 'schema_version_value' - assert response.description == 'description_value' - - -def test_get_execution_from_dict(): - test_get_execution(request_type=dict) - - -def test_get_execution_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_execution), - '__call__') as call: - client.get_execution() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.GetExecutionRequest() - - -@pytest.mark.asyncio -async def test_get_execution_async(transport: str = 'grpc_asyncio', request_type=metadata_service.GetExecutionRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_execution), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(execution.Execution( - name='name_value', - display_name='display_name_value', - state=execution.Execution.State.NEW, - etag='etag_value', - schema_title='schema_title_value', - schema_version='schema_version_value', - description='description_value', - )) - response = await client.get_execution(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.GetExecutionRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, execution.Execution) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == execution.Execution.State.NEW - assert response.etag == 'etag_value' - assert response.schema_title == 'schema_title_value' - assert response.schema_version == 'schema_version_value' - assert response.description == 'description_value' - - -@pytest.mark.asyncio -async def test_get_execution_async_from_dict(): - await test_get_execution_async(request_type=dict) - - -def test_get_execution_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.GetExecutionRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_execution), - '__call__') as call: - call.return_value = execution.Execution() - client.get_execution(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_execution_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.GetExecutionRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_execution), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(execution.Execution()) - await client.get_execution(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_execution_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_execution), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = execution.Execution() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_execution( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_execution_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_execution( - metadata_service.GetExecutionRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_execution_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_execution), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = execution.Execution() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(execution.Execution()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_execution( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_execution_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_execution( - metadata_service.GetExecutionRequest(), - name='name_value', - ) - - -def test_list_executions(transport: str = 'grpc', request_type=metadata_service.ListExecutionsRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_executions), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.ListExecutionsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_executions(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.ListExecutionsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListExecutionsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_executions_from_dict(): - test_list_executions(request_type=dict) - - -def test_list_executions_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_executions), - '__call__') as call: - client.list_executions() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.ListExecutionsRequest() - - -@pytest.mark.asyncio -async def test_list_executions_async(transport: str = 'grpc_asyncio', request_type=metadata_service.ListExecutionsRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_executions), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListExecutionsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_executions(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.ListExecutionsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListExecutionsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_executions_async_from_dict(): - await test_list_executions_async(request_type=dict) - - -def test_list_executions_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.ListExecutionsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_executions), - '__call__') as call: - call.return_value = metadata_service.ListExecutionsResponse() - client.list_executions(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_executions_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.ListExecutionsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_executions), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListExecutionsResponse()) - await client.list_executions(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_executions_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_executions), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.ListExecutionsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_executions( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_executions_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_executions( - metadata_service.ListExecutionsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_executions_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_executions), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.ListExecutionsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListExecutionsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_executions( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_executions_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_executions( - metadata_service.ListExecutionsRequest(), - parent='parent_value', - ) - - -def test_list_executions_pager(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_executions), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - metadata_service.ListExecutionsResponse( - executions=[ - execution.Execution(), - execution.Execution(), - execution.Execution(), - ], - next_page_token='abc', - ), - metadata_service.ListExecutionsResponse( - executions=[], - next_page_token='def', - ), - metadata_service.ListExecutionsResponse( - executions=[ - execution.Execution(), - ], - next_page_token='ghi', - ), - metadata_service.ListExecutionsResponse( - executions=[ - execution.Execution(), - execution.Execution(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_executions(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, execution.Execution) - for i in results) - -def test_list_executions_pages(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_executions), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - metadata_service.ListExecutionsResponse( - executions=[ - execution.Execution(), - execution.Execution(), - execution.Execution(), - ], - next_page_token='abc', - ), - metadata_service.ListExecutionsResponse( - executions=[], - next_page_token='def', - ), - metadata_service.ListExecutionsResponse( - executions=[ - execution.Execution(), - ], - next_page_token='ghi', - ), - metadata_service.ListExecutionsResponse( - executions=[ - execution.Execution(), - execution.Execution(), - ], - ), - RuntimeError, - ) - pages = list(client.list_executions(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_executions_async_pager(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_executions), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - metadata_service.ListExecutionsResponse( - executions=[ - execution.Execution(), - execution.Execution(), - execution.Execution(), - ], - next_page_token='abc', - ), - metadata_service.ListExecutionsResponse( - executions=[], - next_page_token='def', - ), - metadata_service.ListExecutionsResponse( - executions=[ - execution.Execution(), - ], - next_page_token='ghi', - ), - metadata_service.ListExecutionsResponse( - executions=[ - execution.Execution(), - execution.Execution(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_executions(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, execution.Execution) - for i in responses) - -@pytest.mark.asyncio -async def test_list_executions_async_pages(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_executions), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - metadata_service.ListExecutionsResponse( - executions=[ - execution.Execution(), - execution.Execution(), - execution.Execution(), - ], - next_page_token='abc', - ), - metadata_service.ListExecutionsResponse( - executions=[], - next_page_token='def', - ), - metadata_service.ListExecutionsResponse( - executions=[ - execution.Execution(), - ], - next_page_token='ghi', - ), - metadata_service.ListExecutionsResponse( - executions=[ - execution.Execution(), - execution.Execution(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_executions(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_update_execution(transport: str = 'grpc', request_type=metadata_service.UpdateExecutionRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_execution), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_execution.Execution( - name='name_value', - display_name='display_name_value', - state=gca_execution.Execution.State.NEW, - etag='etag_value', - schema_title='schema_title_value', - schema_version='schema_version_value', - description='description_value', - ) - response = client.update_execution(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.UpdateExecutionRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_execution.Execution) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == gca_execution.Execution.State.NEW - assert response.etag == 'etag_value' - assert response.schema_title == 'schema_title_value' - assert response.schema_version == 'schema_version_value' - assert response.description == 'description_value' - - -def test_update_execution_from_dict(): - test_update_execution(request_type=dict) - - -def test_update_execution_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_execution), - '__call__') as call: - client.update_execution() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.UpdateExecutionRequest() - - -@pytest.mark.asyncio -async def test_update_execution_async(transport: str = 'grpc_asyncio', request_type=metadata_service.UpdateExecutionRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_execution), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_execution.Execution( - name='name_value', - display_name='display_name_value', - state=gca_execution.Execution.State.NEW, - etag='etag_value', - schema_title='schema_title_value', - schema_version='schema_version_value', - description='description_value', - )) - response = await client.update_execution(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.UpdateExecutionRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_execution.Execution) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == gca_execution.Execution.State.NEW - assert response.etag == 'etag_value' - assert response.schema_title == 'schema_title_value' - assert response.schema_version == 'schema_version_value' - assert response.description == 'description_value' - - -@pytest.mark.asyncio -async def test_update_execution_async_from_dict(): - await test_update_execution_async(request_type=dict) - - -def test_update_execution_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.UpdateExecutionRequest() - - request.execution.name = 'execution.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_execution), - '__call__') as call: - call.return_value = gca_execution.Execution() - client.update_execution(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'execution.name=execution.name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_execution_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.UpdateExecutionRequest() - - request.execution.name = 'execution.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_execution), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_execution.Execution()) - await client.update_execution(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'execution.name=execution.name/value', - ) in kw['metadata'] - - -def test_update_execution_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_execution), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_execution.Execution() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_execution( - execution=gca_execution.Execution(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].execution - mock_val = gca_execution.Execution(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -def test_update_execution_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_execution( - metadata_service.UpdateExecutionRequest(), - execution=gca_execution.Execution(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.asyncio -async def test_update_execution_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_execution), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_execution.Execution() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_execution.Execution()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_execution( - execution=gca_execution.Execution(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].execution - mock_val = gca_execution.Execution(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_update_execution_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_execution( - metadata_service.UpdateExecutionRequest(), - execution=gca_execution.Execution(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_delete_execution(transport: str = 'grpc', request_type=metadata_service.DeleteExecutionRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_execution), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_execution(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.DeleteExecutionRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_execution_from_dict(): - test_delete_execution(request_type=dict) - - -def test_delete_execution_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_execution), - '__call__') as call: - client.delete_execution() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.DeleteExecutionRequest() - - -@pytest.mark.asyncio -async def test_delete_execution_async(transport: str = 'grpc_asyncio', request_type=metadata_service.DeleteExecutionRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_execution), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_execution(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.DeleteExecutionRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_execution_async_from_dict(): - await test_delete_execution_async(request_type=dict) - - -def test_delete_execution_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.DeleteExecutionRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_execution), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_execution(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_execution_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.DeleteExecutionRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_execution), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_execution(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_execution_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_execution), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_execution( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_delete_execution_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_execution( - metadata_service.DeleteExecutionRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_execution_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_execution), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_execution( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_execution_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_execution( - metadata_service.DeleteExecutionRequest(), - name='name_value', - ) - - -def test_purge_executions(transport: str = 'grpc', request_type=metadata_service.PurgeExecutionsRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.purge_executions), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.purge_executions(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.PurgeExecutionsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_purge_executions_from_dict(): - test_purge_executions(request_type=dict) - - -def test_purge_executions_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.purge_executions), - '__call__') as call: - client.purge_executions() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.PurgeExecutionsRequest() - - -@pytest.mark.asyncio -async def test_purge_executions_async(transport: str = 'grpc_asyncio', request_type=metadata_service.PurgeExecutionsRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.purge_executions), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.purge_executions(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.PurgeExecutionsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_purge_executions_async_from_dict(): - await test_purge_executions_async(request_type=dict) - - -def test_purge_executions_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.PurgeExecutionsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.purge_executions), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.purge_executions(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_purge_executions_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.PurgeExecutionsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.purge_executions), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.purge_executions(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_purge_executions_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.purge_executions), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.purge_executions( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_purge_executions_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.purge_executions( - metadata_service.PurgeExecutionsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_purge_executions_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.purge_executions), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.purge_executions( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_purge_executions_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.purge_executions( - metadata_service.PurgeExecutionsRequest(), - parent='parent_value', - ) - - -def test_add_execution_events(transport: str = 'grpc', request_type=metadata_service.AddExecutionEventsRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_execution_events), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.AddExecutionEventsResponse( - ) - response = client.add_execution_events(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.AddExecutionEventsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, metadata_service.AddExecutionEventsResponse) - - -def test_add_execution_events_from_dict(): - test_add_execution_events(request_type=dict) - - -def test_add_execution_events_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_execution_events), - '__call__') as call: - client.add_execution_events() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.AddExecutionEventsRequest() - - -@pytest.mark.asyncio -async def test_add_execution_events_async(transport: str = 'grpc_asyncio', request_type=metadata_service.AddExecutionEventsRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_execution_events), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.AddExecutionEventsResponse( - )) - response = await client.add_execution_events(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.AddExecutionEventsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, metadata_service.AddExecutionEventsResponse) - - -@pytest.mark.asyncio -async def test_add_execution_events_async_from_dict(): - await test_add_execution_events_async(request_type=dict) - - -def test_add_execution_events_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.AddExecutionEventsRequest() - - request.execution = 'execution/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_execution_events), - '__call__') as call: - call.return_value = metadata_service.AddExecutionEventsResponse() - client.add_execution_events(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'execution=execution/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_add_execution_events_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.AddExecutionEventsRequest() - - request.execution = 'execution/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_execution_events), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.AddExecutionEventsResponse()) - await client.add_execution_events(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'execution=execution/value', - ) in kw['metadata'] - - -def test_add_execution_events_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_execution_events), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.AddExecutionEventsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.add_execution_events( - execution='execution_value', - events=[event.Event(artifact='artifact_value')], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].execution - mock_val = 'execution_value' - assert arg == mock_val - arg = args[0].events - mock_val = [event.Event(artifact='artifact_value')] - assert arg == mock_val - - -def test_add_execution_events_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.add_execution_events( - metadata_service.AddExecutionEventsRequest(), - execution='execution_value', - events=[event.Event(artifact='artifact_value')], - ) - - -@pytest.mark.asyncio -async def test_add_execution_events_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_execution_events), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.AddExecutionEventsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.AddExecutionEventsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.add_execution_events( - execution='execution_value', - events=[event.Event(artifact='artifact_value')], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].execution - mock_val = 'execution_value' - assert arg == mock_val - arg = args[0].events - mock_val = [event.Event(artifact='artifact_value')] - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_add_execution_events_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.add_execution_events( - metadata_service.AddExecutionEventsRequest(), - execution='execution_value', - events=[event.Event(artifact='artifact_value')], - ) - - -def test_query_execution_inputs_and_outputs(transport: str = 'grpc', request_type=metadata_service.QueryExecutionInputsAndOutputsRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_execution_inputs_and_outputs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = lineage_subgraph.LineageSubgraph( - ) - response = client.query_execution_inputs_and_outputs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.QueryExecutionInputsAndOutputsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, lineage_subgraph.LineageSubgraph) - - -def test_query_execution_inputs_and_outputs_from_dict(): - test_query_execution_inputs_and_outputs(request_type=dict) - - -def test_query_execution_inputs_and_outputs_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_execution_inputs_and_outputs), - '__call__') as call: - client.query_execution_inputs_and_outputs() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.QueryExecutionInputsAndOutputsRequest() - - -@pytest.mark.asyncio -async def test_query_execution_inputs_and_outputs_async(transport: str = 'grpc_asyncio', request_type=metadata_service.QueryExecutionInputsAndOutputsRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_execution_inputs_and_outputs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(lineage_subgraph.LineageSubgraph( - )) - response = await client.query_execution_inputs_and_outputs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.QueryExecutionInputsAndOutputsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, lineage_subgraph.LineageSubgraph) - - -@pytest.mark.asyncio -async def test_query_execution_inputs_and_outputs_async_from_dict(): - await test_query_execution_inputs_and_outputs_async(request_type=dict) - - -def test_query_execution_inputs_and_outputs_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.QueryExecutionInputsAndOutputsRequest() - - request.execution = 'execution/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_execution_inputs_and_outputs), - '__call__') as call: - call.return_value = lineage_subgraph.LineageSubgraph() - client.query_execution_inputs_and_outputs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'execution=execution/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_query_execution_inputs_and_outputs_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.QueryExecutionInputsAndOutputsRequest() - - request.execution = 'execution/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_execution_inputs_and_outputs), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(lineage_subgraph.LineageSubgraph()) - await client.query_execution_inputs_and_outputs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'execution=execution/value', - ) in kw['metadata'] - - -def test_query_execution_inputs_and_outputs_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_execution_inputs_and_outputs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = lineage_subgraph.LineageSubgraph() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.query_execution_inputs_and_outputs( - execution='execution_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].execution - mock_val = 'execution_value' - assert arg == mock_val - - -def test_query_execution_inputs_and_outputs_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.query_execution_inputs_and_outputs( - metadata_service.QueryExecutionInputsAndOutputsRequest(), - execution='execution_value', - ) - - -@pytest.mark.asyncio -async def test_query_execution_inputs_and_outputs_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_execution_inputs_and_outputs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = lineage_subgraph.LineageSubgraph() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(lineage_subgraph.LineageSubgraph()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.query_execution_inputs_and_outputs( - execution='execution_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].execution - mock_val = 'execution_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_query_execution_inputs_and_outputs_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.query_execution_inputs_and_outputs( - metadata_service.QueryExecutionInputsAndOutputsRequest(), - execution='execution_value', - ) - - -def test_create_metadata_schema(transport: str = 'grpc', request_type=metadata_service.CreateMetadataSchemaRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_metadata_schema), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_metadata_schema.MetadataSchema( - name='name_value', - schema_version='schema_version_value', - schema='schema_value', - schema_type=gca_metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE, - description='description_value', - ) - response = client.create_metadata_schema(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.CreateMetadataSchemaRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_metadata_schema.MetadataSchema) - assert response.name == 'name_value' - assert response.schema_version == 'schema_version_value' - assert response.schema == 'schema_value' - assert response.schema_type == gca_metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE - assert response.description == 'description_value' - - -def test_create_metadata_schema_from_dict(): - test_create_metadata_schema(request_type=dict) - - -def test_create_metadata_schema_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_metadata_schema), - '__call__') as call: - client.create_metadata_schema() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.CreateMetadataSchemaRequest() - - -@pytest.mark.asyncio -async def test_create_metadata_schema_async(transport: str = 'grpc_asyncio', request_type=metadata_service.CreateMetadataSchemaRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_metadata_schema), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_metadata_schema.MetadataSchema( - name='name_value', - schema_version='schema_version_value', - schema='schema_value', - schema_type=gca_metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE, - description='description_value', - )) - response = await client.create_metadata_schema(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.CreateMetadataSchemaRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_metadata_schema.MetadataSchema) - assert response.name == 'name_value' - assert response.schema_version == 'schema_version_value' - assert response.schema == 'schema_value' - assert response.schema_type == gca_metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE - assert response.description == 'description_value' - - -@pytest.mark.asyncio -async def test_create_metadata_schema_async_from_dict(): - await test_create_metadata_schema_async(request_type=dict) - - -def test_create_metadata_schema_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.CreateMetadataSchemaRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_metadata_schema), - '__call__') as call: - call.return_value = gca_metadata_schema.MetadataSchema() - client.create_metadata_schema(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_metadata_schema_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.CreateMetadataSchemaRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_metadata_schema), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_metadata_schema.MetadataSchema()) - await client.create_metadata_schema(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_metadata_schema_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_metadata_schema), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_metadata_schema.MetadataSchema() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_metadata_schema( - parent='parent_value', - metadata_schema=gca_metadata_schema.MetadataSchema(name='name_value'), - metadata_schema_id='metadata_schema_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].metadata_schema - mock_val = gca_metadata_schema.MetadataSchema(name='name_value') - assert arg == mock_val - arg = args[0].metadata_schema_id - mock_val = 'metadata_schema_id_value' - assert arg == mock_val - - -def test_create_metadata_schema_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_metadata_schema( - metadata_service.CreateMetadataSchemaRequest(), - parent='parent_value', - metadata_schema=gca_metadata_schema.MetadataSchema(name='name_value'), - metadata_schema_id='metadata_schema_id_value', - ) - - -@pytest.mark.asyncio -async def test_create_metadata_schema_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_metadata_schema), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_metadata_schema.MetadataSchema() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_metadata_schema.MetadataSchema()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_metadata_schema( - parent='parent_value', - metadata_schema=gca_metadata_schema.MetadataSchema(name='name_value'), - metadata_schema_id='metadata_schema_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].metadata_schema - mock_val = gca_metadata_schema.MetadataSchema(name='name_value') - assert arg == mock_val - arg = args[0].metadata_schema_id - mock_val = 'metadata_schema_id_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_metadata_schema_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_metadata_schema( - metadata_service.CreateMetadataSchemaRequest(), - parent='parent_value', - metadata_schema=gca_metadata_schema.MetadataSchema(name='name_value'), - metadata_schema_id='metadata_schema_id_value', - ) - - -def test_get_metadata_schema(transport: str = 'grpc', request_type=metadata_service.GetMetadataSchemaRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_metadata_schema), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_schema.MetadataSchema( - name='name_value', - schema_version='schema_version_value', - schema='schema_value', - schema_type=metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE, - description='description_value', - ) - response = client.get_metadata_schema(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.GetMetadataSchemaRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, metadata_schema.MetadataSchema) - assert response.name == 'name_value' - assert response.schema_version == 'schema_version_value' - assert response.schema == 'schema_value' - assert response.schema_type == metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE - assert response.description == 'description_value' - - -def test_get_metadata_schema_from_dict(): - test_get_metadata_schema(request_type=dict) - - -def test_get_metadata_schema_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_metadata_schema), - '__call__') as call: - client.get_metadata_schema() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.GetMetadataSchemaRequest() - - -@pytest.mark.asyncio -async def test_get_metadata_schema_async(transport: str = 'grpc_asyncio', request_type=metadata_service.GetMetadataSchemaRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_metadata_schema), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(metadata_schema.MetadataSchema( - name='name_value', - schema_version='schema_version_value', - schema='schema_value', - schema_type=metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE, - description='description_value', - )) - response = await client.get_metadata_schema(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.GetMetadataSchemaRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, metadata_schema.MetadataSchema) - assert response.name == 'name_value' - assert response.schema_version == 'schema_version_value' - assert response.schema == 'schema_value' - assert response.schema_type == metadata_schema.MetadataSchema.MetadataSchemaType.ARTIFACT_TYPE - assert response.description == 'description_value' - - -@pytest.mark.asyncio -async def test_get_metadata_schema_async_from_dict(): - await test_get_metadata_schema_async(request_type=dict) - - -def test_get_metadata_schema_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.GetMetadataSchemaRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_metadata_schema), - '__call__') as call: - call.return_value = metadata_schema.MetadataSchema() - client.get_metadata_schema(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_metadata_schema_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.GetMetadataSchemaRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_metadata_schema), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_schema.MetadataSchema()) - await client.get_metadata_schema(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_metadata_schema_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_metadata_schema), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_schema.MetadataSchema() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_metadata_schema( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_metadata_schema_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_metadata_schema( - metadata_service.GetMetadataSchemaRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_metadata_schema_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_metadata_schema), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_schema.MetadataSchema() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_schema.MetadataSchema()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_metadata_schema( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_metadata_schema_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_metadata_schema( - metadata_service.GetMetadataSchemaRequest(), - name='name_value', - ) - - -def test_list_metadata_schemas(transport: str = 'grpc', request_type=metadata_service.ListMetadataSchemasRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_schemas), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.ListMetadataSchemasResponse( - next_page_token='next_page_token_value', - ) - response = client.list_metadata_schemas(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.ListMetadataSchemasRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListMetadataSchemasPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_metadata_schemas_from_dict(): - test_list_metadata_schemas(request_type=dict) - - -def test_list_metadata_schemas_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_schemas), - '__call__') as call: - client.list_metadata_schemas() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.ListMetadataSchemasRequest() - - -@pytest.mark.asyncio -async def test_list_metadata_schemas_async(transport: str = 'grpc_asyncio', request_type=metadata_service.ListMetadataSchemasRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_schemas), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListMetadataSchemasResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_metadata_schemas(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.ListMetadataSchemasRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListMetadataSchemasAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_metadata_schemas_async_from_dict(): - await test_list_metadata_schemas_async(request_type=dict) - - -def test_list_metadata_schemas_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.ListMetadataSchemasRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_schemas), - '__call__') as call: - call.return_value = metadata_service.ListMetadataSchemasResponse() - client.list_metadata_schemas(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_metadata_schemas_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.ListMetadataSchemasRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_schemas), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListMetadataSchemasResponse()) - await client.list_metadata_schemas(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_metadata_schemas_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_schemas), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.ListMetadataSchemasResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_metadata_schemas( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_metadata_schemas_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_metadata_schemas( - metadata_service.ListMetadataSchemasRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_metadata_schemas_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_schemas), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = metadata_service.ListMetadataSchemasResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(metadata_service.ListMetadataSchemasResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_metadata_schemas( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_metadata_schemas_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_metadata_schemas( - metadata_service.ListMetadataSchemasRequest(), - parent='parent_value', - ) - - -def test_list_metadata_schemas_pager(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_schemas), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - metadata_service.ListMetadataSchemasResponse( - metadata_schemas=[ - metadata_schema.MetadataSchema(), - metadata_schema.MetadataSchema(), - metadata_schema.MetadataSchema(), - ], - next_page_token='abc', - ), - metadata_service.ListMetadataSchemasResponse( - metadata_schemas=[], - next_page_token='def', - ), - metadata_service.ListMetadataSchemasResponse( - metadata_schemas=[ - metadata_schema.MetadataSchema(), - ], - next_page_token='ghi', - ), - metadata_service.ListMetadataSchemasResponse( - metadata_schemas=[ - metadata_schema.MetadataSchema(), - metadata_schema.MetadataSchema(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_metadata_schemas(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, metadata_schema.MetadataSchema) - for i in results) - -def test_list_metadata_schemas_pages(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_schemas), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - metadata_service.ListMetadataSchemasResponse( - metadata_schemas=[ - metadata_schema.MetadataSchema(), - metadata_schema.MetadataSchema(), - metadata_schema.MetadataSchema(), - ], - next_page_token='abc', - ), - metadata_service.ListMetadataSchemasResponse( - metadata_schemas=[], - next_page_token='def', - ), - metadata_service.ListMetadataSchemasResponse( - metadata_schemas=[ - metadata_schema.MetadataSchema(), - ], - next_page_token='ghi', - ), - metadata_service.ListMetadataSchemasResponse( - metadata_schemas=[ - metadata_schema.MetadataSchema(), - metadata_schema.MetadataSchema(), - ], - ), - RuntimeError, - ) - pages = list(client.list_metadata_schemas(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_metadata_schemas_async_pager(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_schemas), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - metadata_service.ListMetadataSchemasResponse( - metadata_schemas=[ - metadata_schema.MetadataSchema(), - metadata_schema.MetadataSchema(), - metadata_schema.MetadataSchema(), - ], - next_page_token='abc', - ), - metadata_service.ListMetadataSchemasResponse( - metadata_schemas=[], - next_page_token='def', - ), - metadata_service.ListMetadataSchemasResponse( - metadata_schemas=[ - metadata_schema.MetadataSchema(), - ], - next_page_token='ghi', - ), - metadata_service.ListMetadataSchemasResponse( - metadata_schemas=[ - metadata_schema.MetadataSchema(), - metadata_schema.MetadataSchema(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_metadata_schemas(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, metadata_schema.MetadataSchema) - for i in responses) - -@pytest.mark.asyncio -async def test_list_metadata_schemas_async_pages(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_metadata_schemas), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - metadata_service.ListMetadataSchemasResponse( - metadata_schemas=[ - metadata_schema.MetadataSchema(), - metadata_schema.MetadataSchema(), - metadata_schema.MetadataSchema(), - ], - next_page_token='abc', - ), - metadata_service.ListMetadataSchemasResponse( - metadata_schemas=[], - next_page_token='def', - ), - metadata_service.ListMetadataSchemasResponse( - metadata_schemas=[ - metadata_schema.MetadataSchema(), - ], - next_page_token='ghi', - ), - metadata_service.ListMetadataSchemasResponse( - metadata_schemas=[ - metadata_schema.MetadataSchema(), - metadata_schema.MetadataSchema(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_metadata_schemas(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_query_artifact_lineage_subgraph(transport: str = 'grpc', request_type=metadata_service.QueryArtifactLineageSubgraphRequest): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_artifact_lineage_subgraph), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = lineage_subgraph.LineageSubgraph( - ) - response = client.query_artifact_lineage_subgraph(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.QueryArtifactLineageSubgraphRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, lineage_subgraph.LineageSubgraph) - - -def test_query_artifact_lineage_subgraph_from_dict(): - test_query_artifact_lineage_subgraph(request_type=dict) - - -def test_query_artifact_lineage_subgraph_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_artifact_lineage_subgraph), - '__call__') as call: - client.query_artifact_lineage_subgraph() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.QueryArtifactLineageSubgraphRequest() - - -@pytest.mark.asyncio -async def test_query_artifact_lineage_subgraph_async(transport: str = 'grpc_asyncio', request_type=metadata_service.QueryArtifactLineageSubgraphRequest): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_artifact_lineage_subgraph), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(lineage_subgraph.LineageSubgraph( - )) - response = await client.query_artifact_lineage_subgraph(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == metadata_service.QueryArtifactLineageSubgraphRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, lineage_subgraph.LineageSubgraph) - - -@pytest.mark.asyncio -async def test_query_artifact_lineage_subgraph_async_from_dict(): - await test_query_artifact_lineage_subgraph_async(request_type=dict) - - -def test_query_artifact_lineage_subgraph_field_headers(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.QueryArtifactLineageSubgraphRequest() - - request.artifact = 'artifact/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_artifact_lineage_subgraph), - '__call__') as call: - call.return_value = lineage_subgraph.LineageSubgraph() - client.query_artifact_lineage_subgraph(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'artifact=artifact/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_query_artifact_lineage_subgraph_field_headers_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = metadata_service.QueryArtifactLineageSubgraphRequest() - - request.artifact = 'artifact/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_artifact_lineage_subgraph), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(lineage_subgraph.LineageSubgraph()) - await client.query_artifact_lineage_subgraph(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'artifact=artifact/value', - ) in kw['metadata'] - - -def test_query_artifact_lineage_subgraph_flattened(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_artifact_lineage_subgraph), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = lineage_subgraph.LineageSubgraph() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.query_artifact_lineage_subgraph( - artifact='artifact_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].artifact - mock_val = 'artifact_value' - assert arg == mock_val - - -def test_query_artifact_lineage_subgraph_flattened_error(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.query_artifact_lineage_subgraph( - metadata_service.QueryArtifactLineageSubgraphRequest(), - artifact='artifact_value', - ) - - -@pytest.mark.asyncio -async def test_query_artifact_lineage_subgraph_flattened_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.query_artifact_lineage_subgraph), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = lineage_subgraph.LineageSubgraph() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(lineage_subgraph.LineageSubgraph()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.query_artifact_lineage_subgraph( - artifact='artifact_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].artifact - mock_val = 'artifact_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_query_artifact_lineage_subgraph_flattened_error_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.query_artifact_lineage_subgraph( - metadata_service.QueryArtifactLineageSubgraphRequest(), - artifact='artifact_value', - ) - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.MetadataServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.MetadataServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = MetadataServiceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.MetadataServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = MetadataServiceClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.MetadataServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = MetadataServiceClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.MetadataServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.MetadataServiceGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.MetadataServiceGrpcTransport, - transports.MetadataServiceGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.MetadataServiceGrpcTransport, - ) - -def test_metadata_service_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.MetadataServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_metadata_service_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.aiplatform_v1beta1.services.metadata_service.transports.MetadataServiceTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.MetadataServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'create_metadata_store', - 'get_metadata_store', - 'list_metadata_stores', - 'delete_metadata_store', - 'create_artifact', - 'get_artifact', - 'list_artifacts', - 'update_artifact', - 'delete_artifact', - 'purge_artifacts', - 'create_context', - 'get_context', - 'list_contexts', - 'update_context', - 'delete_context', - 'purge_contexts', - 'add_context_artifacts_and_executions', - 'add_context_children', - 'query_context_lineage_subgraph', - 'create_execution', - 'get_execution', - 'list_executions', - 'update_execution', - 'delete_execution', - 'purge_executions', - 'add_execution_events', - 'query_execution_inputs_and_outputs', - 'create_metadata_schema', - 'get_metadata_schema', - 'list_metadata_schemas', - 'query_artifact_lineage_subgraph', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - with pytest.raises(NotImplementedError): - transport.close() - - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client - - -def test_metadata_service_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.metadata_service.transports.MetadataServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.MetadataServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -def test_metadata_service_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.metadata_service.transports.MetadataServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.MetadataServiceTransport() - adc.assert_called_once() - - -def test_metadata_service_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - MetadataServiceClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.MetadataServiceGrpcTransport, - transports.MetadataServiceGrpcAsyncIOTransport, - ], -) -def test_metadata_service_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.MetadataServiceGrpcTransport, grpc_helpers), - (transports.MetadataServiceGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_metadata_service_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "aiplatform.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="aiplatform.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.MetadataServiceGrpcTransport, transports.MetadataServiceGrpcAsyncIOTransport]) -def test_metadata_service_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_metadata_service_host_no_port(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_metadata_service_host_with_port(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:8000' - -def test_metadata_service_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.MetadataServiceGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_metadata_service_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.MetadataServiceGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.MetadataServiceGrpcTransport, transports.MetadataServiceGrpcAsyncIOTransport]) -def test_metadata_service_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.MetadataServiceGrpcTransport, transports.MetadataServiceGrpcAsyncIOTransport]) -def test_metadata_service_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_metadata_service_grpc_lro_client(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_metadata_service_grpc_lro_async_client(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc_asyncio', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsAsyncClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_artifact_path(): - project = "squid" - location = "clam" - metadata_store = "whelk" - artifact = "octopus" - expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}".format(project=project, location=location, metadata_store=metadata_store, artifact=artifact, ) - actual = MetadataServiceClient.artifact_path(project, location, metadata_store, artifact) - assert expected == actual - - -def test_parse_artifact_path(): - expected = { - "project": "oyster", - "location": "nudibranch", - "metadata_store": "cuttlefish", - "artifact": "mussel", - } - path = MetadataServiceClient.artifact_path(**expected) - - # Check that the path construction is reversible. - actual = MetadataServiceClient.parse_artifact_path(path) - assert expected == actual - -def test_context_path(): - project = "winkle" - location = "nautilus" - metadata_store = "scallop" - context = "abalone" - expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}".format(project=project, location=location, metadata_store=metadata_store, context=context, ) - actual = MetadataServiceClient.context_path(project, location, metadata_store, context) - assert expected == actual - - -def test_parse_context_path(): - expected = { - "project": "squid", - "location": "clam", - "metadata_store": "whelk", - "context": "octopus", - } - path = MetadataServiceClient.context_path(**expected) - - # Check that the path construction is reversible. - actual = MetadataServiceClient.parse_context_path(path) - assert expected == actual - -def test_execution_path(): - project = "oyster" - location = "nudibranch" - metadata_store = "cuttlefish" - execution = "mussel" - expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}".format(project=project, location=location, metadata_store=metadata_store, execution=execution, ) - actual = MetadataServiceClient.execution_path(project, location, metadata_store, execution) - assert expected == actual - - -def test_parse_execution_path(): - expected = { - "project": "winkle", - "location": "nautilus", - "metadata_store": "scallop", - "execution": "abalone", - } - path = MetadataServiceClient.execution_path(**expected) - - # Check that the path construction is reversible. - actual = MetadataServiceClient.parse_execution_path(path) - assert expected == actual - -def test_metadata_schema_path(): - project = "squid" - location = "clam" - metadata_store = "whelk" - metadata_schema = "octopus" - expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/metadataSchemas/{metadata_schema}".format(project=project, location=location, metadata_store=metadata_store, metadata_schema=metadata_schema, ) - actual = MetadataServiceClient.metadata_schema_path(project, location, metadata_store, metadata_schema) - assert expected == actual - - -def test_parse_metadata_schema_path(): - expected = { - "project": "oyster", - "location": "nudibranch", - "metadata_store": "cuttlefish", - "metadata_schema": "mussel", - } - path = MetadataServiceClient.metadata_schema_path(**expected) - - # Check that the path construction is reversible. - actual = MetadataServiceClient.parse_metadata_schema_path(path) - assert expected == actual - -def test_metadata_store_path(): - project = "winkle" - location = "nautilus" - metadata_store = "scallop" - expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}".format(project=project, location=location, metadata_store=metadata_store, ) - actual = MetadataServiceClient.metadata_store_path(project, location, metadata_store) - assert expected == actual - - -def test_parse_metadata_store_path(): - expected = { - "project": "abalone", - "location": "squid", - "metadata_store": "clam", - } - path = MetadataServiceClient.metadata_store_path(**expected) - - # Check that the path construction is reversible. - actual = MetadataServiceClient.parse_metadata_store_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "whelk" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = MetadataServiceClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "octopus", - } - path = MetadataServiceClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = MetadataServiceClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "oyster" - expected = "folders/{folder}".format(folder=folder, ) - actual = MetadataServiceClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "nudibranch", - } - path = MetadataServiceClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = MetadataServiceClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "cuttlefish" - expected = "organizations/{organization}".format(organization=organization, ) - actual = MetadataServiceClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "mussel", - } - path = MetadataServiceClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = MetadataServiceClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "winkle" - expected = "projects/{project}".format(project=project, ) - actual = MetadataServiceClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "nautilus", - } - path = MetadataServiceClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = MetadataServiceClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "scallop" - location = "abalone" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = MetadataServiceClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "squid", - "location": "clam", - } - path = MetadataServiceClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = MetadataServiceClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.MetadataServiceTransport, '_prep_wrapped_messages') as prep: - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.MetadataServiceTransport, '_prep_wrapped_messages') as prep: - transport_class = MetadataServiceClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - -@pytest.mark.asyncio -async def test_transport_close_async(): - client = MetadataServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: - async with client: - close.assert_not_called() - close.assert_called_once() - -def test_transport_close(): - transports = { - "grpc": "_grpc_channel", - } - - for transport, close_name in transports.items(): - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: - with client: - close.assert_not_called() - close.assert_called_once() - -def test_client_ctx(): - transports = [ - 'grpc', - ] - for transport in transports: - client = MetadataServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - # Test client calls underlying transport. - with mock.patch.object(type(client.transport), "close") as close: - close.assert_not_called() - with client: - pass - close.assert_called() diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py deleted file mode 100644 index acb06f2bb4..0000000000 --- a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_migration_service.py +++ /dev/null @@ -1,1748 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import future -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import operation_async # type: ignore -from google.api_core import operations_v1 -from google.api_core import path_template -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.aiplatform_v1beta1.services.migration_service import MigrationServiceAsyncClient -from google.cloud.aiplatform_v1beta1.services.migration_service import MigrationServiceClient -from google.cloud.aiplatform_v1beta1.services.migration_service import pagers -from google.cloud.aiplatform_v1beta1.services.migration_service import transports -from google.cloud.aiplatform_v1beta1.types import migratable_resource -from google.cloud.aiplatform_v1beta1.types import migration_service -from google.longrunning import operations_pb2 -from google.oauth2 import service_account -import google.auth - - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert MigrationServiceClient._get_default_mtls_endpoint(None) is None - assert MigrationServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert MigrationServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert MigrationServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert MigrationServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert MigrationServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - MigrationServiceClient, - MigrationServiceAsyncClient, -]) -def test_migration_service_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.MigrationServiceGrpcTransport, "grpc"), - (transports.MigrationServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_migration_service_client_service_account_always_use_jwt(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=False) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("client_class", [ - MigrationServiceClient, - MigrationServiceAsyncClient, -]) -def test_migration_service_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_migration_service_client_get_transport_class(): - transport = MigrationServiceClient.get_transport_class() - available_transports = [ - transports.MigrationServiceGrpcTransport, - ] - assert transport in available_transports - - transport = MigrationServiceClient.get_transport_class("grpc") - assert transport == transports.MigrationServiceGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (MigrationServiceClient, transports.MigrationServiceGrpcTransport, "grpc"), - (MigrationServiceAsyncClient, transports.MigrationServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(MigrationServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MigrationServiceClient)) -@mock.patch.object(MigrationServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MigrationServiceAsyncClient)) -def test_migration_service_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(MigrationServiceClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(MigrationServiceClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (MigrationServiceClient, transports.MigrationServiceGrpcTransport, "grpc", "true"), - (MigrationServiceAsyncClient, transports.MigrationServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (MigrationServiceClient, transports.MigrationServiceGrpcTransport, "grpc", "false"), - (MigrationServiceAsyncClient, transports.MigrationServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(MigrationServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MigrationServiceClient)) -@mock.patch.object(MigrationServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MigrationServiceAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_migration_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (MigrationServiceClient, transports.MigrationServiceGrpcTransport, "grpc"), - (MigrationServiceAsyncClient, transports.MigrationServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_migration_service_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (MigrationServiceClient, transports.MigrationServiceGrpcTransport, "grpc"), - (MigrationServiceAsyncClient, transports.MigrationServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_migration_service_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_migration_service_client_client_options_from_dict(): - with mock.patch('google.cloud.aiplatform_v1beta1.services.migration_service.transports.MigrationServiceGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = MigrationServiceClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_search_migratable_resources(transport: str = 'grpc', request_type=migration_service.SearchMigratableResourcesRequest): - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_migratable_resources), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = migration_service.SearchMigratableResourcesResponse( - next_page_token='next_page_token_value', - ) - response = client.search_migratable_resources(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == migration_service.SearchMigratableResourcesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.SearchMigratableResourcesPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_search_migratable_resources_from_dict(): - test_search_migratable_resources(request_type=dict) - - -def test_search_migratable_resources_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_migratable_resources), - '__call__') as call: - client.search_migratable_resources() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == migration_service.SearchMigratableResourcesRequest() - - -@pytest.mark.asyncio -async def test_search_migratable_resources_async(transport: str = 'grpc_asyncio', request_type=migration_service.SearchMigratableResourcesRequest): - client = MigrationServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_migratable_resources), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(migration_service.SearchMigratableResourcesResponse( - next_page_token='next_page_token_value', - )) - response = await client.search_migratable_resources(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == migration_service.SearchMigratableResourcesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.SearchMigratableResourcesAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_search_migratable_resources_async_from_dict(): - await test_search_migratable_resources_async(request_type=dict) - - -def test_search_migratable_resources_field_headers(): - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = migration_service.SearchMigratableResourcesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_migratable_resources), - '__call__') as call: - call.return_value = migration_service.SearchMigratableResourcesResponse() - client.search_migratable_resources(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_search_migratable_resources_field_headers_async(): - client = MigrationServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = migration_service.SearchMigratableResourcesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_migratable_resources), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(migration_service.SearchMigratableResourcesResponse()) - await client.search_migratable_resources(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_search_migratable_resources_flattened(): - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_migratable_resources), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = migration_service.SearchMigratableResourcesResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.search_migratable_resources( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_search_migratable_resources_flattened_error(): - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.search_migratable_resources( - migration_service.SearchMigratableResourcesRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_search_migratable_resources_flattened_async(): - client = MigrationServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_migratable_resources), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = migration_service.SearchMigratableResourcesResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(migration_service.SearchMigratableResourcesResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.search_migratable_resources( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_search_migratable_resources_flattened_error_async(): - client = MigrationServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.search_migratable_resources( - migration_service.SearchMigratableResourcesRequest(), - parent='parent_value', - ) - - -def test_search_migratable_resources_pager(): - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_migratable_resources), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[ - migratable_resource.MigratableResource(), - migratable_resource.MigratableResource(), - migratable_resource.MigratableResource(), - ], - next_page_token='abc', - ), - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[], - next_page_token='def', - ), - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[ - migratable_resource.MigratableResource(), - ], - next_page_token='ghi', - ), - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[ - migratable_resource.MigratableResource(), - migratable_resource.MigratableResource(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.search_migratable_resources(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, migratable_resource.MigratableResource) - for i in results) - -def test_search_migratable_resources_pages(): - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_migratable_resources), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[ - migratable_resource.MigratableResource(), - migratable_resource.MigratableResource(), - migratable_resource.MigratableResource(), - ], - next_page_token='abc', - ), - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[], - next_page_token='def', - ), - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[ - migratable_resource.MigratableResource(), - ], - next_page_token='ghi', - ), - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[ - migratable_resource.MigratableResource(), - migratable_resource.MigratableResource(), - ], - ), - RuntimeError, - ) - pages = list(client.search_migratable_resources(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_search_migratable_resources_async_pager(): - client = MigrationServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_migratable_resources), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[ - migratable_resource.MigratableResource(), - migratable_resource.MigratableResource(), - migratable_resource.MigratableResource(), - ], - next_page_token='abc', - ), - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[], - next_page_token='def', - ), - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[ - migratable_resource.MigratableResource(), - ], - next_page_token='ghi', - ), - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[ - migratable_resource.MigratableResource(), - migratable_resource.MigratableResource(), - ], - ), - RuntimeError, - ) - async_pager = await client.search_migratable_resources(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, migratable_resource.MigratableResource) - for i in responses) - -@pytest.mark.asyncio -async def test_search_migratable_resources_async_pages(): - client = MigrationServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.search_migratable_resources), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[ - migratable_resource.MigratableResource(), - migratable_resource.MigratableResource(), - migratable_resource.MigratableResource(), - ], - next_page_token='abc', - ), - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[], - next_page_token='def', - ), - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[ - migratable_resource.MigratableResource(), - ], - next_page_token='ghi', - ), - migration_service.SearchMigratableResourcesResponse( - migratable_resources=[ - migratable_resource.MigratableResource(), - migratable_resource.MigratableResource(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.search_migratable_resources(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_batch_migrate_resources(transport: str = 'grpc', request_type=migration_service.BatchMigrateResourcesRequest): - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_migrate_resources), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.batch_migrate_resources(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == migration_service.BatchMigrateResourcesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_batch_migrate_resources_from_dict(): - test_batch_migrate_resources(request_type=dict) - - -def test_batch_migrate_resources_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_migrate_resources), - '__call__') as call: - client.batch_migrate_resources() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == migration_service.BatchMigrateResourcesRequest() - - -@pytest.mark.asyncio -async def test_batch_migrate_resources_async(transport: str = 'grpc_asyncio', request_type=migration_service.BatchMigrateResourcesRequest): - client = MigrationServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_migrate_resources), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.batch_migrate_resources(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == migration_service.BatchMigrateResourcesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_batch_migrate_resources_async_from_dict(): - await test_batch_migrate_resources_async(request_type=dict) - - -def test_batch_migrate_resources_field_headers(): - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = migration_service.BatchMigrateResourcesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_migrate_resources), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.batch_migrate_resources(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_batch_migrate_resources_field_headers_async(): - client = MigrationServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = migration_service.BatchMigrateResourcesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_migrate_resources), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.batch_migrate_resources(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_batch_migrate_resources_flattened(): - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_migrate_resources), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.batch_migrate_resources( - parent='parent_value', - migrate_resource_requests=[migration_service.MigrateResourceRequest(migrate_ml_engine_model_version_config=migration_service.MigrateResourceRequest.MigrateMlEngineModelVersionConfig(endpoint='endpoint_value'))], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].migrate_resource_requests - mock_val = [migration_service.MigrateResourceRequest(migrate_ml_engine_model_version_config=migration_service.MigrateResourceRequest.MigrateMlEngineModelVersionConfig(endpoint='endpoint_value'))] - assert arg == mock_val - - -def test_batch_migrate_resources_flattened_error(): - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.batch_migrate_resources( - migration_service.BatchMigrateResourcesRequest(), - parent='parent_value', - migrate_resource_requests=[migration_service.MigrateResourceRequest(migrate_ml_engine_model_version_config=migration_service.MigrateResourceRequest.MigrateMlEngineModelVersionConfig(endpoint='endpoint_value'))], - ) - - -@pytest.mark.asyncio -async def test_batch_migrate_resources_flattened_async(): - client = MigrationServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_migrate_resources), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.batch_migrate_resources( - parent='parent_value', - migrate_resource_requests=[migration_service.MigrateResourceRequest(migrate_ml_engine_model_version_config=migration_service.MigrateResourceRequest.MigrateMlEngineModelVersionConfig(endpoint='endpoint_value'))], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].migrate_resource_requests - mock_val = [migration_service.MigrateResourceRequest(migrate_ml_engine_model_version_config=migration_service.MigrateResourceRequest.MigrateMlEngineModelVersionConfig(endpoint='endpoint_value'))] - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_batch_migrate_resources_flattened_error_async(): - client = MigrationServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.batch_migrate_resources( - migration_service.BatchMigrateResourcesRequest(), - parent='parent_value', - migrate_resource_requests=[migration_service.MigrateResourceRequest(migrate_ml_engine_model_version_config=migration_service.MigrateResourceRequest.MigrateMlEngineModelVersionConfig(endpoint='endpoint_value'))], - ) - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.MigrationServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.MigrationServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = MigrationServiceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.MigrationServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = MigrationServiceClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.MigrationServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = MigrationServiceClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.MigrationServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.MigrationServiceGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.MigrationServiceGrpcTransport, - transports.MigrationServiceGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.MigrationServiceGrpcTransport, - ) - -def test_migration_service_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.MigrationServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_migration_service_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.aiplatform_v1beta1.services.migration_service.transports.MigrationServiceTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.MigrationServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'search_migratable_resources', - 'batch_migrate_resources', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - with pytest.raises(NotImplementedError): - transport.close() - - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client - - -def test_migration_service_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.migration_service.transports.MigrationServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.MigrationServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -def test_migration_service_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.migration_service.transports.MigrationServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.MigrationServiceTransport() - adc.assert_called_once() - - -def test_migration_service_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - MigrationServiceClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.MigrationServiceGrpcTransport, - transports.MigrationServiceGrpcAsyncIOTransport, - ], -) -def test_migration_service_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.MigrationServiceGrpcTransport, grpc_helpers), - (transports.MigrationServiceGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_migration_service_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "aiplatform.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="aiplatform.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.MigrationServiceGrpcTransport, transports.MigrationServiceGrpcAsyncIOTransport]) -def test_migration_service_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_migration_service_host_no_port(): - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_migration_service_host_with_port(): - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:8000' - -def test_migration_service_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.MigrationServiceGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_migration_service_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.MigrationServiceGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.MigrationServiceGrpcTransport, transports.MigrationServiceGrpcAsyncIOTransport]) -def test_migration_service_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.MigrationServiceGrpcTransport, transports.MigrationServiceGrpcAsyncIOTransport]) -def test_migration_service_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_migration_service_grpc_lro_client(): - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_migration_service_grpc_lro_async_client(): - client = MigrationServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc_asyncio', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsAsyncClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_annotated_dataset_path(): - project = "squid" - dataset = "clam" - annotated_dataset = "whelk" - expected = "projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}".format(project=project, dataset=dataset, annotated_dataset=annotated_dataset, ) - actual = MigrationServiceClient.annotated_dataset_path(project, dataset, annotated_dataset) - assert expected == actual - - -def test_parse_annotated_dataset_path(): - expected = { - "project": "octopus", - "dataset": "oyster", - "annotated_dataset": "nudibranch", - } - path = MigrationServiceClient.annotated_dataset_path(**expected) - - # Check that the path construction is reversible. - actual = MigrationServiceClient.parse_annotated_dataset_path(path) - assert expected == actual - -def test_dataset_path(): - project = "cuttlefish" - location = "mussel" - dataset = "winkle" - expected = "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, location, dataset) - assert expected == actual - - -def test_parse_dataset_path(): - expected = { - "project": "nautilus", - "location": "scallop", - "dataset": "abalone", - } - path = MigrationServiceClient.dataset_path(**expected) - - # Check that the path construction is reversible. - actual = MigrationServiceClient.parse_dataset_path(path) - assert expected == actual - -def test_dataset_path(): - project = "squid" - dataset = "clam" - expected = "projects/{project}/datasets/{dataset}".format(project=project, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, dataset) - assert expected == actual - - -def test_parse_dataset_path(): - expected = { - "project": "whelk", - "dataset": "octopus", - } - path = MigrationServiceClient.dataset_path(**expected) - - # Check that the path construction is reversible. - actual = MigrationServiceClient.parse_dataset_path(path) - assert expected == actual - -def test_dataset_path(): - project = "oyster" - location = "nudibranch" - dataset = "cuttlefish" - expected = "projects/{project}/locations/{location}/datasets/{dataset}".format(project=project, location=location, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, location, dataset) - assert expected == actual - - -def test_parse_dataset_path(): - expected = { - "project": "mussel", - "location": "winkle", - "dataset": "nautilus", - } - path = MigrationServiceClient.dataset_path(**expected) - - # Check that the path construction is reversible. - actual = MigrationServiceClient.parse_dataset_path(path) - assert expected == actual - -def test_model_path(): - project = "scallop" - location = "abalone" - model = "squid" - expected = "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) - actual = MigrationServiceClient.model_path(project, location, model) - assert expected == actual - - -def test_parse_model_path(): - expected = { - "project": "clam", - "location": "whelk", - "model": "octopus", - } - path = MigrationServiceClient.model_path(**expected) - - # Check that the path construction is reversible. - actual = MigrationServiceClient.parse_model_path(path) - assert expected == actual - -def test_model_path(): - project = "oyster" - location = "nudibranch" - model = "cuttlefish" - expected = "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) - actual = MigrationServiceClient.model_path(project, location, model) - assert expected == actual - - -def test_parse_model_path(): - expected = { - "project": "mussel", - "location": "winkle", - "model": "nautilus", - } - path = MigrationServiceClient.model_path(**expected) - - # Check that the path construction is reversible. - actual = MigrationServiceClient.parse_model_path(path) - assert expected == actual - -def test_version_path(): - project = "scallop" - model = "abalone" - version = "squid" - expected = "projects/{project}/models/{model}/versions/{version}".format(project=project, model=model, version=version, ) - actual = MigrationServiceClient.version_path(project, model, version) - assert expected == actual - - -def test_parse_version_path(): - expected = { - "project": "clam", - "model": "whelk", - "version": "octopus", - } - path = MigrationServiceClient.version_path(**expected) - - # Check that the path construction is reversible. - actual = MigrationServiceClient.parse_version_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "oyster" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = MigrationServiceClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "nudibranch", - } - path = MigrationServiceClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = MigrationServiceClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "cuttlefish" - expected = "folders/{folder}".format(folder=folder, ) - actual = MigrationServiceClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "mussel", - } - path = MigrationServiceClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = MigrationServiceClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "winkle" - expected = "organizations/{organization}".format(organization=organization, ) - actual = MigrationServiceClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "nautilus", - } - path = MigrationServiceClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = MigrationServiceClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "scallop" - expected = "projects/{project}".format(project=project, ) - actual = MigrationServiceClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "abalone", - } - path = MigrationServiceClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = MigrationServiceClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "squid" - location = "clam" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = MigrationServiceClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "whelk", - "location": "octopus", - } - path = MigrationServiceClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = MigrationServiceClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.MigrationServiceTransport, '_prep_wrapped_messages') as prep: - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.MigrationServiceTransport, '_prep_wrapped_messages') as prep: - transport_class = MigrationServiceClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - -@pytest.mark.asyncio -async def test_transport_close_async(): - client = MigrationServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: - async with client: - close.assert_not_called() - close.assert_called_once() - -def test_transport_close(): - transports = { - "grpc": "_grpc_channel", - } - - for transport, close_name in transports.items(): - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: - with client: - close.assert_not_called() - close.assert_called_once() - -def test_client_ctx(): - transports = [ - 'grpc', - ] - for transport in transports: - client = MigrationServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - # Test client calls underlying transport. - with mock.patch.object(type(client.transport), "close") as close: - close.assert_not_called() - with client: - pass - close.assert_called() diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_model_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_model_service.py deleted file mode 100644 index c5312baf65..0000000000 --- a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_model_service.py +++ /dev/null @@ -1,4088 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import future -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import operation_async # type: ignore -from google.api_core import operations_v1 -from google.api_core import path_template -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.aiplatform_v1beta1.services.model_service import ModelServiceAsyncClient -from google.cloud.aiplatform_v1beta1.services.model_service import ModelServiceClient -from google.cloud.aiplatform_v1beta1.services.model_service import pagers -from google.cloud.aiplatform_v1beta1.services.model_service import transports -from google.cloud.aiplatform_v1beta1.types import deployed_model_ref -from google.cloud.aiplatform_v1beta1.types import encryption_spec -from google.cloud.aiplatform_v1beta1.types import env_var -from google.cloud.aiplatform_v1beta1.types import explanation -from google.cloud.aiplatform_v1beta1.types import explanation_metadata -from google.cloud.aiplatform_v1beta1.types import io -from google.cloud.aiplatform_v1beta1.types import model -from google.cloud.aiplatform_v1beta1.types import model as gca_model -from google.cloud.aiplatform_v1beta1.types import model_evaluation -from google.cloud.aiplatform_v1beta1.types import model_evaluation_slice -from google.cloud.aiplatform_v1beta1.types import model_service -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.longrunning import operations_pb2 -from google.oauth2 import service_account -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -import google.auth - - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert ModelServiceClient._get_default_mtls_endpoint(None) is None - assert ModelServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert ModelServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert ModelServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert ModelServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert ModelServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - ModelServiceClient, - ModelServiceAsyncClient, -]) -def test_model_service_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.ModelServiceGrpcTransport, "grpc"), - (transports.ModelServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_model_service_client_service_account_always_use_jwt(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=False) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("client_class", [ - ModelServiceClient, - ModelServiceAsyncClient, -]) -def test_model_service_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_model_service_client_get_transport_class(): - transport = ModelServiceClient.get_transport_class() - available_transports = [ - transports.ModelServiceGrpcTransport, - ] - assert transport in available_transports - - transport = ModelServiceClient.get_transport_class("grpc") - assert transport == transports.ModelServiceGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (ModelServiceClient, transports.ModelServiceGrpcTransport, "grpc"), - (ModelServiceAsyncClient, transports.ModelServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(ModelServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ModelServiceClient)) -@mock.patch.object(ModelServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ModelServiceAsyncClient)) -def test_model_service_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(ModelServiceClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(ModelServiceClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (ModelServiceClient, transports.ModelServiceGrpcTransport, "grpc", "true"), - (ModelServiceAsyncClient, transports.ModelServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (ModelServiceClient, transports.ModelServiceGrpcTransport, "grpc", "false"), - (ModelServiceAsyncClient, transports.ModelServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(ModelServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ModelServiceClient)) -@mock.patch.object(ModelServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ModelServiceAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_model_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (ModelServiceClient, transports.ModelServiceGrpcTransport, "grpc"), - (ModelServiceAsyncClient, transports.ModelServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_model_service_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (ModelServiceClient, transports.ModelServiceGrpcTransport, "grpc"), - (ModelServiceAsyncClient, transports.ModelServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_model_service_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_model_service_client_client_options_from_dict(): - with mock.patch('google.cloud.aiplatform_v1beta1.services.model_service.transports.ModelServiceGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = ModelServiceClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_upload_model(transport: str = 'grpc', request_type=model_service.UploadModelRequest): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.upload_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.upload_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.UploadModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_upload_model_from_dict(): - test_upload_model(request_type=dict) - - -def test_upload_model_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.upload_model), - '__call__') as call: - client.upload_model() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.UploadModelRequest() - - -@pytest.mark.asyncio -async def test_upload_model_async(transport: str = 'grpc_asyncio', request_type=model_service.UploadModelRequest): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.upload_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.upload_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.UploadModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_upload_model_async_from_dict(): - await test_upload_model_async(request_type=dict) - - -def test_upload_model_field_headers(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.UploadModelRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.upload_model), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.upload_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_upload_model_field_headers_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.UploadModelRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.upload_model), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.upload_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_upload_model_flattened(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.upload_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.upload_model( - parent='parent_value', - model=gca_model.Model(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].model - mock_val = gca_model.Model(name='name_value') - assert arg == mock_val - - -def test_upload_model_flattened_error(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.upload_model( - model_service.UploadModelRequest(), - parent='parent_value', - model=gca_model.Model(name='name_value'), - ) - - -@pytest.mark.asyncio -async def test_upload_model_flattened_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.upload_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.upload_model( - parent='parent_value', - model=gca_model.Model(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].model - mock_val = gca_model.Model(name='name_value') - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_upload_model_flattened_error_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.upload_model( - model_service.UploadModelRequest(), - parent='parent_value', - model=gca_model.Model(name='name_value'), - ) - - -def test_get_model(transport: str = 'grpc', request_type=model_service.GetModelRequest): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model.Model( - name='name_value', - display_name='display_name_value', - description='description_value', - metadata_schema_uri='metadata_schema_uri_value', - training_pipeline='training_pipeline_value', - artifact_uri='artifact_uri_value', - supported_deployment_resources_types=[model.Model.DeploymentResourcesType.DEDICATED_RESOURCES], - supported_input_storage_formats=['supported_input_storage_formats_value'], - supported_output_storage_formats=['supported_output_storage_formats_value'], - etag='etag_value', - ) - response = client.get_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.GetModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, model.Model) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.metadata_schema_uri == 'metadata_schema_uri_value' - assert response.training_pipeline == 'training_pipeline_value' - assert response.artifact_uri == 'artifact_uri_value' - assert response.supported_deployment_resources_types == [model.Model.DeploymentResourcesType.DEDICATED_RESOURCES] - assert response.supported_input_storage_formats == ['supported_input_storage_formats_value'] - assert response.supported_output_storage_formats == ['supported_output_storage_formats_value'] - assert response.etag == 'etag_value' - - -def test_get_model_from_dict(): - test_get_model(request_type=dict) - - -def test_get_model_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model), - '__call__') as call: - client.get_model() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.GetModelRequest() - - -@pytest.mark.asyncio -async def test_get_model_async(transport: str = 'grpc_asyncio', request_type=model_service.GetModelRequest): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model.Model( - name='name_value', - display_name='display_name_value', - description='description_value', - metadata_schema_uri='metadata_schema_uri_value', - training_pipeline='training_pipeline_value', - artifact_uri='artifact_uri_value', - supported_deployment_resources_types=[model.Model.DeploymentResourcesType.DEDICATED_RESOURCES], - supported_input_storage_formats=['supported_input_storage_formats_value'], - supported_output_storage_formats=['supported_output_storage_formats_value'], - etag='etag_value', - )) - response = await client.get_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.GetModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, model.Model) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.metadata_schema_uri == 'metadata_schema_uri_value' - assert response.training_pipeline == 'training_pipeline_value' - assert response.artifact_uri == 'artifact_uri_value' - assert response.supported_deployment_resources_types == [model.Model.DeploymentResourcesType.DEDICATED_RESOURCES] - assert response.supported_input_storage_formats == ['supported_input_storage_formats_value'] - assert response.supported_output_storage_formats == ['supported_output_storage_formats_value'] - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_get_model_async_from_dict(): - await test_get_model_async(request_type=dict) - - -def test_get_model_field_headers(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.GetModelRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model), - '__call__') as call: - call.return_value = model.Model() - client.get_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_model_field_headers_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.GetModelRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model.Model()) - await client.get_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_model_flattened(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model.Model() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_model( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_model_flattened_error(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_model( - model_service.GetModelRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_model_flattened_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model.Model() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model.Model()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_model( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_model_flattened_error_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_model( - model_service.GetModelRequest(), - name='name_value', - ) - - -def test_list_models(transport: str = 'grpc', request_type=model_service.ListModelsRequest): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_service.ListModelsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_models(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.ListModelsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListModelsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_models_from_dict(): - test_list_models(request_type=dict) - - -def test_list_models_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__') as call: - client.list_models() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.ListModelsRequest() - - -@pytest.mark.asyncio -async def test_list_models_async(transport: str = 'grpc_asyncio', request_type=model_service.ListModelsRequest): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_models(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.ListModelsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListModelsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_models_async_from_dict(): - await test_list_models_async(request_type=dict) - - -def test_list_models_field_headers(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.ListModelsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__') as call: - call.return_value = model_service.ListModelsResponse() - client.list_models(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_models_field_headers_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.ListModelsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelsResponse()) - await client.list_models(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_models_flattened(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_service.ListModelsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_models( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_models_flattened_error(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_models( - model_service.ListModelsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_models_flattened_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_service.ListModelsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_models( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_models_flattened_error_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_models( - model_service.ListModelsRequest(), - parent='parent_value', - ) - - -def test_list_models_pager(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - model_service.ListModelsResponse( - models=[ - model.Model(), - model.Model(), - model.Model(), - ], - next_page_token='abc', - ), - model_service.ListModelsResponse( - models=[], - next_page_token='def', - ), - model_service.ListModelsResponse( - models=[ - model.Model(), - ], - next_page_token='ghi', - ), - model_service.ListModelsResponse( - models=[ - model.Model(), - model.Model(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_models(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, model.Model) - for i in results) - -def test_list_models_pages(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - model_service.ListModelsResponse( - models=[ - model.Model(), - model.Model(), - model.Model(), - ], - next_page_token='abc', - ), - model_service.ListModelsResponse( - models=[], - next_page_token='def', - ), - model_service.ListModelsResponse( - models=[ - model.Model(), - ], - next_page_token='ghi', - ), - model_service.ListModelsResponse( - models=[ - model.Model(), - model.Model(), - ], - ), - RuntimeError, - ) - pages = list(client.list_models(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_models_async_pager(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - model_service.ListModelsResponse( - models=[ - model.Model(), - model.Model(), - model.Model(), - ], - next_page_token='abc', - ), - model_service.ListModelsResponse( - models=[], - next_page_token='def', - ), - model_service.ListModelsResponse( - models=[ - model.Model(), - ], - next_page_token='ghi', - ), - model_service.ListModelsResponse( - models=[ - model.Model(), - model.Model(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_models(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, model.Model) - for i in responses) - -@pytest.mark.asyncio -async def test_list_models_async_pages(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_models), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - model_service.ListModelsResponse( - models=[ - model.Model(), - model.Model(), - model.Model(), - ], - next_page_token='abc', - ), - model_service.ListModelsResponse( - models=[], - next_page_token='def', - ), - model_service.ListModelsResponse( - models=[ - model.Model(), - ], - next_page_token='ghi', - ), - model_service.ListModelsResponse( - models=[ - model.Model(), - model.Model(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_models(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_update_model(transport: str = 'grpc', request_type=model_service.UpdateModelRequest): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_model.Model( - name='name_value', - display_name='display_name_value', - description='description_value', - metadata_schema_uri='metadata_schema_uri_value', - training_pipeline='training_pipeline_value', - artifact_uri='artifact_uri_value', - supported_deployment_resources_types=[gca_model.Model.DeploymentResourcesType.DEDICATED_RESOURCES], - supported_input_storage_formats=['supported_input_storage_formats_value'], - supported_output_storage_formats=['supported_output_storage_formats_value'], - etag='etag_value', - ) - response = client.update_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.UpdateModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_model.Model) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.metadata_schema_uri == 'metadata_schema_uri_value' - assert response.training_pipeline == 'training_pipeline_value' - assert response.artifact_uri == 'artifact_uri_value' - assert response.supported_deployment_resources_types == [gca_model.Model.DeploymentResourcesType.DEDICATED_RESOURCES] - assert response.supported_input_storage_formats == ['supported_input_storage_formats_value'] - assert response.supported_output_storage_formats == ['supported_output_storage_formats_value'] - assert response.etag == 'etag_value' - - -def test_update_model_from_dict(): - test_update_model(request_type=dict) - - -def test_update_model_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_model), - '__call__') as call: - client.update_model() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.UpdateModelRequest() - - -@pytest.mark.asyncio -async def test_update_model_async(transport: str = 'grpc_asyncio', request_type=model_service.UpdateModelRequest): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_model.Model( - name='name_value', - display_name='display_name_value', - description='description_value', - metadata_schema_uri='metadata_schema_uri_value', - training_pipeline='training_pipeline_value', - artifact_uri='artifact_uri_value', - supported_deployment_resources_types=[gca_model.Model.DeploymentResourcesType.DEDICATED_RESOURCES], - supported_input_storage_formats=['supported_input_storage_formats_value'], - supported_output_storage_formats=['supported_output_storage_formats_value'], - etag='etag_value', - )) - response = await client.update_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.UpdateModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_model.Model) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.metadata_schema_uri == 'metadata_schema_uri_value' - assert response.training_pipeline == 'training_pipeline_value' - assert response.artifact_uri == 'artifact_uri_value' - assert response.supported_deployment_resources_types == [gca_model.Model.DeploymentResourcesType.DEDICATED_RESOURCES] - assert response.supported_input_storage_formats == ['supported_input_storage_formats_value'] - assert response.supported_output_storage_formats == ['supported_output_storage_formats_value'] - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_update_model_async_from_dict(): - await test_update_model_async(request_type=dict) - - -def test_update_model_field_headers(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.UpdateModelRequest() - - request.model.name = 'model.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_model), - '__call__') as call: - call.return_value = gca_model.Model() - client.update_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'model.name=model.name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_model_field_headers_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.UpdateModelRequest() - - request.model.name = 'model.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_model), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_model.Model()) - await client.update_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'model.name=model.name/value', - ) in kw['metadata'] - - -def test_update_model_flattened(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_model.Model() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_model( - model=gca_model.Model(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].model - mock_val = gca_model.Model(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -def test_update_model_flattened_error(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_model( - model_service.UpdateModelRequest(), - model=gca_model.Model(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.asyncio -async def test_update_model_flattened_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_model.Model() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_model.Model()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_model( - model=gca_model.Model(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].model - mock_val = gca_model.Model(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_update_model_flattened_error_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_model( - model_service.UpdateModelRequest(), - model=gca_model.Model(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_delete_model(transport: str = 'grpc', request_type=model_service.DeleteModelRequest): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.DeleteModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_model_from_dict(): - test_delete_model(request_type=dict) - - -def test_delete_model_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model), - '__call__') as call: - client.delete_model() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.DeleteModelRequest() - - -@pytest.mark.asyncio -async def test_delete_model_async(transport: str = 'grpc_asyncio', request_type=model_service.DeleteModelRequest): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.DeleteModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_model_async_from_dict(): - await test_delete_model_async(request_type=dict) - - -def test_delete_model_field_headers(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.DeleteModelRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_model_field_headers_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.DeleteModelRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_model_flattened(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_model( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_delete_model_flattened_error(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_model( - model_service.DeleteModelRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_model_flattened_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_model( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_model_flattened_error_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_model( - model_service.DeleteModelRequest(), - name='name_value', - ) - - -def test_export_model(transport: str = 'grpc', request_type=model_service.ExportModelRequest): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.export_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.ExportModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_export_model_from_dict(): - test_export_model(request_type=dict) - - -def test_export_model_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_model), - '__call__') as call: - client.export_model() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.ExportModelRequest() - - -@pytest.mark.asyncio -async def test_export_model_async(transport: str = 'grpc_asyncio', request_type=model_service.ExportModelRequest): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.export_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.ExportModelRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_export_model_async_from_dict(): - await test_export_model_async(request_type=dict) - - -def test_export_model_field_headers(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.ExportModelRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_model), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.export_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_export_model_field_headers_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.ExportModelRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_model), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.export_model(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_export_model_flattened(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.export_model( - name='name_value', - output_config=model_service.ExportModelRequest.OutputConfig(export_format_id='export_format_id_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - arg = args[0].output_config - mock_val = model_service.ExportModelRequest.OutputConfig(export_format_id='export_format_id_value') - assert arg == mock_val - - -def test_export_model_flattened_error(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.export_model( - model_service.ExportModelRequest(), - name='name_value', - output_config=model_service.ExportModelRequest.OutputConfig(export_format_id='export_format_id_value'), - ) - - -@pytest.mark.asyncio -async def test_export_model_flattened_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_model), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.export_model( - name='name_value', - output_config=model_service.ExportModelRequest.OutputConfig(export_format_id='export_format_id_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - arg = args[0].output_config - mock_val = model_service.ExportModelRequest.OutputConfig(export_format_id='export_format_id_value') - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_export_model_flattened_error_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.export_model( - model_service.ExportModelRequest(), - name='name_value', - output_config=model_service.ExportModelRequest.OutputConfig(export_format_id='export_format_id_value'), - ) - - -def test_get_model_evaluation(transport: str = 'grpc', request_type=model_service.GetModelEvaluationRequest): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_evaluation.ModelEvaluation( - name='name_value', - metrics_schema_uri='metrics_schema_uri_value', - slice_dimensions=['slice_dimensions_value'], - ) - response = client.get_model_evaluation(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.GetModelEvaluationRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, model_evaluation.ModelEvaluation) - assert response.name == 'name_value' - assert response.metrics_schema_uri == 'metrics_schema_uri_value' - assert response.slice_dimensions == ['slice_dimensions_value'] - - -def test_get_model_evaluation_from_dict(): - test_get_model_evaluation(request_type=dict) - - -def test_get_model_evaluation_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation), - '__call__') as call: - client.get_model_evaluation() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.GetModelEvaluationRequest() - - -@pytest.mark.asyncio -async def test_get_model_evaluation_async(transport: str = 'grpc_asyncio', request_type=model_service.GetModelEvaluationRequest): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model_evaluation.ModelEvaluation( - name='name_value', - metrics_schema_uri='metrics_schema_uri_value', - slice_dimensions=['slice_dimensions_value'], - )) - response = await client.get_model_evaluation(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.GetModelEvaluationRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, model_evaluation.ModelEvaluation) - assert response.name == 'name_value' - assert response.metrics_schema_uri == 'metrics_schema_uri_value' - assert response.slice_dimensions == ['slice_dimensions_value'] - - -@pytest.mark.asyncio -async def test_get_model_evaluation_async_from_dict(): - await test_get_model_evaluation_async(request_type=dict) - - -def test_get_model_evaluation_field_headers(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.GetModelEvaluationRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation), - '__call__') as call: - call.return_value = model_evaluation.ModelEvaluation() - client.get_model_evaluation(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_model_evaluation_field_headers_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.GetModelEvaluationRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_evaluation.ModelEvaluation()) - await client.get_model_evaluation(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_model_evaluation_flattened(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_evaluation.ModelEvaluation() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_model_evaluation( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_model_evaluation_flattened_error(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_model_evaluation( - model_service.GetModelEvaluationRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_model_evaluation_flattened_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_evaluation.ModelEvaluation() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_evaluation.ModelEvaluation()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_model_evaluation( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_model_evaluation_flattened_error_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_model_evaluation( - model_service.GetModelEvaluationRequest(), - name='name_value', - ) - - -def test_list_model_evaluations(transport: str = 'grpc', request_type=model_service.ListModelEvaluationsRequest): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_service.ListModelEvaluationsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_model_evaluations(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.ListModelEvaluationsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListModelEvaluationsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_model_evaluations_from_dict(): - test_list_model_evaluations(request_type=dict) - - -def test_list_model_evaluations_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__') as call: - client.list_model_evaluations() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.ListModelEvaluationsRequest() - - -@pytest.mark.asyncio -async def test_list_model_evaluations_async(transport: str = 'grpc_asyncio', request_type=model_service.ListModelEvaluationsRequest): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelEvaluationsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_model_evaluations(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.ListModelEvaluationsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListModelEvaluationsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_model_evaluations_async_from_dict(): - await test_list_model_evaluations_async(request_type=dict) - - -def test_list_model_evaluations_field_headers(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.ListModelEvaluationsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__') as call: - call.return_value = model_service.ListModelEvaluationsResponse() - client.list_model_evaluations(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_model_evaluations_field_headers_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.ListModelEvaluationsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelEvaluationsResponse()) - await client.list_model_evaluations(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_model_evaluations_flattened(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_service.ListModelEvaluationsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_model_evaluations( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_model_evaluations_flattened_error(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_model_evaluations( - model_service.ListModelEvaluationsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_model_evaluations_flattened_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_service.ListModelEvaluationsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelEvaluationsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_model_evaluations( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_model_evaluations_flattened_error_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_model_evaluations( - model_service.ListModelEvaluationsRequest(), - parent='parent_value', - ) - - -def test_list_model_evaluations_pager(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - model_service.ListModelEvaluationsResponse( - model_evaluations=[ - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - ], - next_page_token='abc', - ), - model_service.ListModelEvaluationsResponse( - model_evaluations=[], - next_page_token='def', - ), - model_service.ListModelEvaluationsResponse( - model_evaluations=[ - model_evaluation.ModelEvaluation(), - ], - next_page_token='ghi', - ), - model_service.ListModelEvaluationsResponse( - model_evaluations=[ - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_model_evaluations(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, model_evaluation.ModelEvaluation) - for i in results) - -def test_list_model_evaluations_pages(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - model_service.ListModelEvaluationsResponse( - model_evaluations=[ - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - ], - next_page_token='abc', - ), - model_service.ListModelEvaluationsResponse( - model_evaluations=[], - next_page_token='def', - ), - model_service.ListModelEvaluationsResponse( - model_evaluations=[ - model_evaluation.ModelEvaluation(), - ], - next_page_token='ghi', - ), - model_service.ListModelEvaluationsResponse( - model_evaluations=[ - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - ], - ), - RuntimeError, - ) - pages = list(client.list_model_evaluations(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_model_evaluations_async_pager(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - model_service.ListModelEvaluationsResponse( - model_evaluations=[ - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - ], - next_page_token='abc', - ), - model_service.ListModelEvaluationsResponse( - model_evaluations=[], - next_page_token='def', - ), - model_service.ListModelEvaluationsResponse( - model_evaluations=[ - model_evaluation.ModelEvaluation(), - ], - next_page_token='ghi', - ), - model_service.ListModelEvaluationsResponse( - model_evaluations=[ - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_model_evaluations(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, model_evaluation.ModelEvaluation) - for i in responses) - -@pytest.mark.asyncio -async def test_list_model_evaluations_async_pages(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluations), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - model_service.ListModelEvaluationsResponse( - model_evaluations=[ - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - ], - next_page_token='abc', - ), - model_service.ListModelEvaluationsResponse( - model_evaluations=[], - next_page_token='def', - ), - model_service.ListModelEvaluationsResponse( - model_evaluations=[ - model_evaluation.ModelEvaluation(), - ], - next_page_token='ghi', - ), - model_service.ListModelEvaluationsResponse( - model_evaluations=[ - model_evaluation.ModelEvaluation(), - model_evaluation.ModelEvaluation(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_model_evaluations(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_get_model_evaluation_slice(transport: str = 'grpc', request_type=model_service.GetModelEvaluationSliceRequest): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation_slice), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_evaluation_slice.ModelEvaluationSlice( - name='name_value', - metrics_schema_uri='metrics_schema_uri_value', - ) - response = client.get_model_evaluation_slice(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.GetModelEvaluationSliceRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, model_evaluation_slice.ModelEvaluationSlice) - assert response.name == 'name_value' - assert response.metrics_schema_uri == 'metrics_schema_uri_value' - - -def test_get_model_evaluation_slice_from_dict(): - test_get_model_evaluation_slice(request_type=dict) - - -def test_get_model_evaluation_slice_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation_slice), - '__call__') as call: - client.get_model_evaluation_slice() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.GetModelEvaluationSliceRequest() - - -@pytest.mark.asyncio -async def test_get_model_evaluation_slice_async(transport: str = 'grpc_asyncio', request_type=model_service.GetModelEvaluationSliceRequest): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation_slice), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model_evaluation_slice.ModelEvaluationSlice( - name='name_value', - metrics_schema_uri='metrics_schema_uri_value', - )) - response = await client.get_model_evaluation_slice(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.GetModelEvaluationSliceRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, model_evaluation_slice.ModelEvaluationSlice) - assert response.name == 'name_value' - assert response.metrics_schema_uri == 'metrics_schema_uri_value' - - -@pytest.mark.asyncio -async def test_get_model_evaluation_slice_async_from_dict(): - await test_get_model_evaluation_slice_async(request_type=dict) - - -def test_get_model_evaluation_slice_field_headers(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.GetModelEvaluationSliceRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation_slice), - '__call__') as call: - call.return_value = model_evaluation_slice.ModelEvaluationSlice() - client.get_model_evaluation_slice(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_model_evaluation_slice_field_headers_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.GetModelEvaluationSliceRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation_slice), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_evaluation_slice.ModelEvaluationSlice()) - await client.get_model_evaluation_slice(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_model_evaluation_slice_flattened(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation_slice), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_evaluation_slice.ModelEvaluationSlice() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_model_evaluation_slice( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_model_evaluation_slice_flattened_error(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_model_evaluation_slice( - model_service.GetModelEvaluationSliceRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_model_evaluation_slice_flattened_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_model_evaluation_slice), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_evaluation_slice.ModelEvaluationSlice() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_evaluation_slice.ModelEvaluationSlice()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_model_evaluation_slice( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_model_evaluation_slice_flattened_error_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_model_evaluation_slice( - model_service.GetModelEvaluationSliceRequest(), - name='name_value', - ) - - -def test_list_model_evaluation_slices(transport: str = 'grpc', request_type=model_service.ListModelEvaluationSlicesRequest): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluation_slices), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_service.ListModelEvaluationSlicesResponse( - next_page_token='next_page_token_value', - ) - response = client.list_model_evaluation_slices(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.ListModelEvaluationSlicesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListModelEvaluationSlicesPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_model_evaluation_slices_from_dict(): - test_list_model_evaluation_slices(request_type=dict) - - -def test_list_model_evaluation_slices_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluation_slices), - '__call__') as call: - client.list_model_evaluation_slices() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.ListModelEvaluationSlicesRequest() - - -@pytest.mark.asyncio -async def test_list_model_evaluation_slices_async(transport: str = 'grpc_asyncio', request_type=model_service.ListModelEvaluationSlicesRequest): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluation_slices), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelEvaluationSlicesResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_model_evaluation_slices(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == model_service.ListModelEvaluationSlicesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListModelEvaluationSlicesAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_model_evaluation_slices_async_from_dict(): - await test_list_model_evaluation_slices_async(request_type=dict) - - -def test_list_model_evaluation_slices_field_headers(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.ListModelEvaluationSlicesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluation_slices), - '__call__') as call: - call.return_value = model_service.ListModelEvaluationSlicesResponse() - client.list_model_evaluation_slices(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_model_evaluation_slices_field_headers_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = model_service.ListModelEvaluationSlicesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluation_slices), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelEvaluationSlicesResponse()) - await client.list_model_evaluation_slices(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_model_evaluation_slices_flattened(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluation_slices), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_service.ListModelEvaluationSlicesResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_model_evaluation_slices( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_model_evaluation_slices_flattened_error(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_model_evaluation_slices( - model_service.ListModelEvaluationSlicesRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_model_evaluation_slices_flattened_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluation_slices), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = model_service.ListModelEvaluationSlicesResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model_service.ListModelEvaluationSlicesResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_model_evaluation_slices( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_model_evaluation_slices_flattened_error_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_model_evaluation_slices( - model_service.ListModelEvaluationSlicesRequest(), - parent='parent_value', - ) - - -def test_list_model_evaluation_slices_pager(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluation_slices), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[ - model_evaluation_slice.ModelEvaluationSlice(), - model_evaluation_slice.ModelEvaluationSlice(), - model_evaluation_slice.ModelEvaluationSlice(), - ], - next_page_token='abc', - ), - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[], - next_page_token='def', - ), - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[ - model_evaluation_slice.ModelEvaluationSlice(), - ], - next_page_token='ghi', - ), - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[ - model_evaluation_slice.ModelEvaluationSlice(), - model_evaluation_slice.ModelEvaluationSlice(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_model_evaluation_slices(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, model_evaluation_slice.ModelEvaluationSlice) - for i in results) - -def test_list_model_evaluation_slices_pages(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluation_slices), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[ - model_evaluation_slice.ModelEvaluationSlice(), - model_evaluation_slice.ModelEvaluationSlice(), - model_evaluation_slice.ModelEvaluationSlice(), - ], - next_page_token='abc', - ), - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[], - next_page_token='def', - ), - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[ - model_evaluation_slice.ModelEvaluationSlice(), - ], - next_page_token='ghi', - ), - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[ - model_evaluation_slice.ModelEvaluationSlice(), - model_evaluation_slice.ModelEvaluationSlice(), - ], - ), - RuntimeError, - ) - pages = list(client.list_model_evaluation_slices(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_model_evaluation_slices_async_pager(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluation_slices), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[ - model_evaluation_slice.ModelEvaluationSlice(), - model_evaluation_slice.ModelEvaluationSlice(), - model_evaluation_slice.ModelEvaluationSlice(), - ], - next_page_token='abc', - ), - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[], - next_page_token='def', - ), - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[ - model_evaluation_slice.ModelEvaluationSlice(), - ], - next_page_token='ghi', - ), - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[ - model_evaluation_slice.ModelEvaluationSlice(), - model_evaluation_slice.ModelEvaluationSlice(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_model_evaluation_slices(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, model_evaluation_slice.ModelEvaluationSlice) - for i in responses) - -@pytest.mark.asyncio -async def test_list_model_evaluation_slices_async_pages(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_model_evaluation_slices), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[ - model_evaluation_slice.ModelEvaluationSlice(), - model_evaluation_slice.ModelEvaluationSlice(), - model_evaluation_slice.ModelEvaluationSlice(), - ], - next_page_token='abc', - ), - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[], - next_page_token='def', - ), - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[ - model_evaluation_slice.ModelEvaluationSlice(), - ], - next_page_token='ghi', - ), - model_service.ListModelEvaluationSlicesResponse( - model_evaluation_slices=[ - model_evaluation_slice.ModelEvaluationSlice(), - model_evaluation_slice.ModelEvaluationSlice(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_model_evaluation_slices(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.ModelServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.ModelServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = ModelServiceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.ModelServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = ModelServiceClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.ModelServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = ModelServiceClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.ModelServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.ModelServiceGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.ModelServiceGrpcTransport, - transports.ModelServiceGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.ModelServiceGrpcTransport, - ) - -def test_model_service_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.ModelServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_model_service_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.aiplatform_v1beta1.services.model_service.transports.ModelServiceTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.ModelServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'upload_model', - 'get_model', - 'list_models', - 'update_model', - 'delete_model', - 'export_model', - 'get_model_evaluation', - 'list_model_evaluations', - 'get_model_evaluation_slice', - 'list_model_evaluation_slices', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - with pytest.raises(NotImplementedError): - transport.close() - - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client - - -def test_model_service_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.model_service.transports.ModelServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.ModelServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -def test_model_service_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.model_service.transports.ModelServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.ModelServiceTransport() - adc.assert_called_once() - - -def test_model_service_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - ModelServiceClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.ModelServiceGrpcTransport, - transports.ModelServiceGrpcAsyncIOTransport, - ], -) -def test_model_service_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.ModelServiceGrpcTransport, grpc_helpers), - (transports.ModelServiceGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_model_service_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "aiplatform.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="aiplatform.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.ModelServiceGrpcTransport, transports.ModelServiceGrpcAsyncIOTransport]) -def test_model_service_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_model_service_host_no_port(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_model_service_host_with_port(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:8000' - -def test_model_service_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.ModelServiceGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_model_service_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.ModelServiceGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.ModelServiceGrpcTransport, transports.ModelServiceGrpcAsyncIOTransport]) -def test_model_service_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.ModelServiceGrpcTransport, transports.ModelServiceGrpcAsyncIOTransport]) -def test_model_service_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_model_service_grpc_lro_client(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_model_service_grpc_lro_async_client(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc_asyncio', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsAsyncClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_endpoint_path(): - project = "squid" - location = "clam" - endpoint = "whelk" - expected = "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) - actual = ModelServiceClient.endpoint_path(project, location, endpoint) - assert expected == actual - - -def test_parse_endpoint_path(): - expected = { - "project": "octopus", - "location": "oyster", - "endpoint": "nudibranch", - } - path = ModelServiceClient.endpoint_path(**expected) - - # Check that the path construction is reversible. - actual = ModelServiceClient.parse_endpoint_path(path) - assert expected == actual - -def test_model_path(): - project = "cuttlefish" - location = "mussel" - model = "winkle" - expected = "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) - actual = ModelServiceClient.model_path(project, location, model) - assert expected == actual - - -def test_parse_model_path(): - expected = { - "project": "nautilus", - "location": "scallop", - "model": "abalone", - } - path = ModelServiceClient.model_path(**expected) - - # Check that the path construction is reversible. - actual = ModelServiceClient.parse_model_path(path) - assert expected == actual - -def test_model_evaluation_path(): - project = "squid" - location = "clam" - model = "whelk" - evaluation = "octopus" - expected = "projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}".format(project=project, location=location, model=model, evaluation=evaluation, ) - actual = ModelServiceClient.model_evaluation_path(project, location, model, evaluation) - assert expected == actual - - -def test_parse_model_evaluation_path(): - expected = { - "project": "oyster", - "location": "nudibranch", - "model": "cuttlefish", - "evaluation": "mussel", - } - path = ModelServiceClient.model_evaluation_path(**expected) - - # Check that the path construction is reversible. - actual = ModelServiceClient.parse_model_evaluation_path(path) - assert expected == actual - -def test_model_evaluation_slice_path(): - project = "winkle" - location = "nautilus" - model = "scallop" - evaluation = "abalone" - slice = "squid" - expected = "projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}".format(project=project, location=location, model=model, evaluation=evaluation, slice=slice, ) - actual = ModelServiceClient.model_evaluation_slice_path(project, location, model, evaluation, slice) - assert expected == actual - - -def test_parse_model_evaluation_slice_path(): - expected = { - "project": "clam", - "location": "whelk", - "model": "octopus", - "evaluation": "oyster", - "slice": "nudibranch", - } - path = ModelServiceClient.model_evaluation_slice_path(**expected) - - # Check that the path construction is reversible. - actual = ModelServiceClient.parse_model_evaluation_slice_path(path) - assert expected == actual - -def test_training_pipeline_path(): - project = "cuttlefish" - location = "mussel" - training_pipeline = "winkle" - expected = "projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}".format(project=project, location=location, training_pipeline=training_pipeline, ) - actual = ModelServiceClient.training_pipeline_path(project, location, training_pipeline) - assert expected == actual - - -def test_parse_training_pipeline_path(): - expected = { - "project": "nautilus", - "location": "scallop", - "training_pipeline": "abalone", - } - path = ModelServiceClient.training_pipeline_path(**expected) - - # Check that the path construction is reversible. - actual = ModelServiceClient.parse_training_pipeline_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "squid" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = ModelServiceClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "clam", - } - path = ModelServiceClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = ModelServiceClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "whelk" - expected = "folders/{folder}".format(folder=folder, ) - actual = ModelServiceClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "octopus", - } - path = ModelServiceClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = ModelServiceClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "oyster" - expected = "organizations/{organization}".format(organization=organization, ) - actual = ModelServiceClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "nudibranch", - } - path = ModelServiceClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = ModelServiceClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "cuttlefish" - expected = "projects/{project}".format(project=project, ) - actual = ModelServiceClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "mussel", - } - path = ModelServiceClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = ModelServiceClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "winkle" - location = "nautilus" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = ModelServiceClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "scallop", - "location": "abalone", - } - path = ModelServiceClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = ModelServiceClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.ModelServiceTransport, '_prep_wrapped_messages') as prep: - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.ModelServiceTransport, '_prep_wrapped_messages') as prep: - transport_class = ModelServiceClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - -@pytest.mark.asyncio -async def test_transport_close_async(): - client = ModelServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: - async with client: - close.assert_not_called() - close.assert_called_once() - -def test_transport_close(): - transports = { - "grpc": "_grpc_channel", - } - - for transport, close_name in transports.items(): - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: - with client: - close.assert_not_called() - close.assert_called_once() - -def test_client_ctx(): - transports = [ - 'grpc', - ] - for transport in transports: - client = ModelServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - # Test client calls underlying transport. - with mock.patch.object(type(client.transport), "close") as close: - close.assert_not_called() - with client: - pass - close.assert_called() diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_pipeline_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_pipeline_service.py deleted file mode 100644 index 951323594c..0000000000 --- a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_pipeline_service.py +++ /dev/null @@ -1,3953 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import future -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import operation_async # type: ignore -from google.api_core import operations_v1 -from google.api_core import path_template -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.aiplatform_v1beta1.services.pipeline_service import PipelineServiceAsyncClient -from google.cloud.aiplatform_v1beta1.services.pipeline_service import PipelineServiceClient -from google.cloud.aiplatform_v1beta1.services.pipeline_service import pagers -from google.cloud.aiplatform_v1beta1.services.pipeline_service import transports -from google.cloud.aiplatform_v1beta1.types import artifact -from google.cloud.aiplatform_v1beta1.types import context -from google.cloud.aiplatform_v1beta1.types import deployed_model_ref -from google.cloud.aiplatform_v1beta1.types import encryption_spec -from google.cloud.aiplatform_v1beta1.types import env_var -from google.cloud.aiplatform_v1beta1.types import execution -from google.cloud.aiplatform_v1beta1.types import explanation -from google.cloud.aiplatform_v1beta1.types import explanation_metadata -from google.cloud.aiplatform_v1beta1.types import io -from google.cloud.aiplatform_v1beta1.types import model -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.cloud.aiplatform_v1beta1.types import pipeline_job -from google.cloud.aiplatform_v1beta1.types import pipeline_job as gca_pipeline_job -from google.cloud.aiplatform_v1beta1.types import pipeline_service -from google.cloud.aiplatform_v1beta1.types import pipeline_state -from google.cloud.aiplatform_v1beta1.types import training_pipeline -from google.cloud.aiplatform_v1beta1.types import training_pipeline as gca_training_pipeline -from google.cloud.aiplatform_v1beta1.types import value -from google.longrunning import operations_pb2 -from google.oauth2 import service_account -from google.protobuf import any_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore -import google.auth - - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert PipelineServiceClient._get_default_mtls_endpoint(None) is None - assert PipelineServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert PipelineServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert PipelineServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert PipelineServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert PipelineServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - PipelineServiceClient, - PipelineServiceAsyncClient, -]) -def test_pipeline_service_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.PipelineServiceGrpcTransport, "grpc"), - (transports.PipelineServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_pipeline_service_client_service_account_always_use_jwt(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=False) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("client_class", [ - PipelineServiceClient, - PipelineServiceAsyncClient, -]) -def test_pipeline_service_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_pipeline_service_client_get_transport_class(): - transport = PipelineServiceClient.get_transport_class() - available_transports = [ - transports.PipelineServiceGrpcTransport, - ] - assert transport in available_transports - - transport = PipelineServiceClient.get_transport_class("grpc") - assert transport == transports.PipelineServiceGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (PipelineServiceClient, transports.PipelineServiceGrpcTransport, "grpc"), - (PipelineServiceAsyncClient, transports.PipelineServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(PipelineServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PipelineServiceClient)) -@mock.patch.object(PipelineServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PipelineServiceAsyncClient)) -def test_pipeline_service_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(PipelineServiceClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(PipelineServiceClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (PipelineServiceClient, transports.PipelineServiceGrpcTransport, "grpc", "true"), - (PipelineServiceAsyncClient, transports.PipelineServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (PipelineServiceClient, transports.PipelineServiceGrpcTransport, "grpc", "false"), - (PipelineServiceAsyncClient, transports.PipelineServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(PipelineServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PipelineServiceClient)) -@mock.patch.object(PipelineServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PipelineServiceAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_pipeline_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (PipelineServiceClient, transports.PipelineServiceGrpcTransport, "grpc"), - (PipelineServiceAsyncClient, transports.PipelineServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_pipeline_service_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (PipelineServiceClient, transports.PipelineServiceGrpcTransport, "grpc"), - (PipelineServiceAsyncClient, transports.PipelineServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_pipeline_service_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_pipeline_service_client_client_options_from_dict(): - with mock.patch('google.cloud.aiplatform_v1beta1.services.pipeline_service.transports.PipelineServiceGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = PipelineServiceClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_create_training_pipeline(transport: str = 'grpc', request_type=pipeline_service.CreateTrainingPipelineRequest): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_training_pipeline.TrainingPipeline( - name='name_value', - display_name='display_name_value', - training_task_definition='training_task_definition_value', - state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, - ) - response = client.create_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.CreateTrainingPipelineRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_training_pipeline.TrainingPipeline) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.training_task_definition == 'training_task_definition_value' - assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED - - -def test_create_training_pipeline_from_dict(): - test_create_training_pipeline(request_type=dict) - - -def test_create_training_pipeline_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_training_pipeline), - '__call__') as call: - client.create_training_pipeline() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.CreateTrainingPipelineRequest() - - -@pytest.mark.asyncio -async def test_create_training_pipeline_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.CreateTrainingPipelineRequest): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_training_pipeline.TrainingPipeline( - name='name_value', - display_name='display_name_value', - training_task_definition='training_task_definition_value', - state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, - )) - response = await client.create_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.CreateTrainingPipelineRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_training_pipeline.TrainingPipeline) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.training_task_definition == 'training_task_definition_value' - assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED - - -@pytest.mark.asyncio -async def test_create_training_pipeline_async_from_dict(): - await test_create_training_pipeline_async(request_type=dict) - - -def test_create_training_pipeline_field_headers(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.CreateTrainingPipelineRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_training_pipeline), - '__call__') as call: - call.return_value = gca_training_pipeline.TrainingPipeline() - client.create_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_training_pipeline_field_headers_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.CreateTrainingPipelineRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_training_pipeline), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_training_pipeline.TrainingPipeline()) - await client.create_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_training_pipeline_flattened(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_training_pipeline.TrainingPipeline() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_training_pipeline( - parent='parent_value', - training_pipeline=gca_training_pipeline.TrainingPipeline(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].training_pipeline - mock_val = gca_training_pipeline.TrainingPipeline(name='name_value') - assert arg == mock_val - - -def test_create_training_pipeline_flattened_error(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_training_pipeline( - pipeline_service.CreateTrainingPipelineRequest(), - parent='parent_value', - training_pipeline=gca_training_pipeline.TrainingPipeline(name='name_value'), - ) - - -@pytest.mark.asyncio -async def test_create_training_pipeline_flattened_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_training_pipeline.TrainingPipeline() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_training_pipeline.TrainingPipeline()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_training_pipeline( - parent='parent_value', - training_pipeline=gca_training_pipeline.TrainingPipeline(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].training_pipeline - mock_val = gca_training_pipeline.TrainingPipeline(name='name_value') - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_training_pipeline_flattened_error_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_training_pipeline( - pipeline_service.CreateTrainingPipelineRequest(), - parent='parent_value', - training_pipeline=gca_training_pipeline.TrainingPipeline(name='name_value'), - ) - - -def test_get_training_pipeline(transport: str = 'grpc', request_type=pipeline_service.GetTrainingPipelineRequest): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = training_pipeline.TrainingPipeline( - name='name_value', - display_name='display_name_value', - training_task_definition='training_task_definition_value', - state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, - ) - response = client.get_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.GetTrainingPipelineRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, training_pipeline.TrainingPipeline) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.training_task_definition == 'training_task_definition_value' - assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED - - -def test_get_training_pipeline_from_dict(): - test_get_training_pipeline(request_type=dict) - - -def test_get_training_pipeline_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_training_pipeline), - '__call__') as call: - client.get_training_pipeline() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.GetTrainingPipelineRequest() - - -@pytest.mark.asyncio -async def test_get_training_pipeline_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.GetTrainingPipelineRequest): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(training_pipeline.TrainingPipeline( - name='name_value', - display_name='display_name_value', - training_task_definition='training_task_definition_value', - state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, - )) - response = await client.get_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.GetTrainingPipelineRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, training_pipeline.TrainingPipeline) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.training_task_definition == 'training_task_definition_value' - assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED - - -@pytest.mark.asyncio -async def test_get_training_pipeline_async_from_dict(): - await test_get_training_pipeline_async(request_type=dict) - - -def test_get_training_pipeline_field_headers(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.GetTrainingPipelineRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_training_pipeline), - '__call__') as call: - call.return_value = training_pipeline.TrainingPipeline() - client.get_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_training_pipeline_field_headers_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.GetTrainingPipelineRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_training_pipeline), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(training_pipeline.TrainingPipeline()) - await client.get_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_training_pipeline_flattened(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = training_pipeline.TrainingPipeline() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_training_pipeline( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_training_pipeline_flattened_error(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_training_pipeline( - pipeline_service.GetTrainingPipelineRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_training_pipeline_flattened_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = training_pipeline.TrainingPipeline() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(training_pipeline.TrainingPipeline()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_training_pipeline( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_training_pipeline_flattened_error_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_training_pipeline( - pipeline_service.GetTrainingPipelineRequest(), - name='name_value', - ) - - -def test_list_training_pipelines(transport: str = 'grpc', request_type=pipeline_service.ListTrainingPipelinesRequest): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_training_pipelines), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = pipeline_service.ListTrainingPipelinesResponse( - next_page_token='next_page_token_value', - ) - response = client.list_training_pipelines(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.ListTrainingPipelinesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListTrainingPipelinesPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_training_pipelines_from_dict(): - test_list_training_pipelines(request_type=dict) - - -def test_list_training_pipelines_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_training_pipelines), - '__call__') as call: - client.list_training_pipelines() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.ListTrainingPipelinesRequest() - - -@pytest.mark.asyncio -async def test_list_training_pipelines_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.ListTrainingPipelinesRequest): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_training_pipelines), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(pipeline_service.ListTrainingPipelinesResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_training_pipelines(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.ListTrainingPipelinesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListTrainingPipelinesAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_training_pipelines_async_from_dict(): - await test_list_training_pipelines_async(request_type=dict) - - -def test_list_training_pipelines_field_headers(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.ListTrainingPipelinesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_training_pipelines), - '__call__') as call: - call.return_value = pipeline_service.ListTrainingPipelinesResponse() - client.list_training_pipelines(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_training_pipelines_field_headers_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.ListTrainingPipelinesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_training_pipelines), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pipeline_service.ListTrainingPipelinesResponse()) - await client.list_training_pipelines(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_training_pipelines_flattened(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_training_pipelines), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = pipeline_service.ListTrainingPipelinesResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_training_pipelines( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_training_pipelines_flattened_error(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_training_pipelines( - pipeline_service.ListTrainingPipelinesRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_training_pipelines_flattened_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_training_pipelines), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = pipeline_service.ListTrainingPipelinesResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pipeline_service.ListTrainingPipelinesResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_training_pipelines( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_training_pipelines_flattened_error_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_training_pipelines( - pipeline_service.ListTrainingPipelinesRequest(), - parent='parent_value', - ) - - -def test_list_training_pipelines_pager(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_training_pipelines), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[ - training_pipeline.TrainingPipeline(), - training_pipeline.TrainingPipeline(), - training_pipeline.TrainingPipeline(), - ], - next_page_token='abc', - ), - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[], - next_page_token='def', - ), - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[ - training_pipeline.TrainingPipeline(), - ], - next_page_token='ghi', - ), - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[ - training_pipeline.TrainingPipeline(), - training_pipeline.TrainingPipeline(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_training_pipelines(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, training_pipeline.TrainingPipeline) - for i in results) - -def test_list_training_pipelines_pages(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_training_pipelines), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[ - training_pipeline.TrainingPipeline(), - training_pipeline.TrainingPipeline(), - training_pipeline.TrainingPipeline(), - ], - next_page_token='abc', - ), - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[], - next_page_token='def', - ), - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[ - training_pipeline.TrainingPipeline(), - ], - next_page_token='ghi', - ), - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[ - training_pipeline.TrainingPipeline(), - training_pipeline.TrainingPipeline(), - ], - ), - RuntimeError, - ) - pages = list(client.list_training_pipelines(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_training_pipelines_async_pager(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_training_pipelines), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[ - training_pipeline.TrainingPipeline(), - training_pipeline.TrainingPipeline(), - training_pipeline.TrainingPipeline(), - ], - next_page_token='abc', - ), - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[], - next_page_token='def', - ), - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[ - training_pipeline.TrainingPipeline(), - ], - next_page_token='ghi', - ), - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[ - training_pipeline.TrainingPipeline(), - training_pipeline.TrainingPipeline(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_training_pipelines(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, training_pipeline.TrainingPipeline) - for i in responses) - -@pytest.mark.asyncio -async def test_list_training_pipelines_async_pages(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_training_pipelines), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[ - training_pipeline.TrainingPipeline(), - training_pipeline.TrainingPipeline(), - training_pipeline.TrainingPipeline(), - ], - next_page_token='abc', - ), - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[], - next_page_token='def', - ), - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[ - training_pipeline.TrainingPipeline(), - ], - next_page_token='ghi', - ), - pipeline_service.ListTrainingPipelinesResponse( - training_pipelines=[ - training_pipeline.TrainingPipeline(), - training_pipeline.TrainingPipeline(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_training_pipelines(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_delete_training_pipeline(transport: str = 'grpc', request_type=pipeline_service.DeleteTrainingPipelineRequest): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.DeleteTrainingPipelineRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_training_pipeline_from_dict(): - test_delete_training_pipeline(request_type=dict) - - -def test_delete_training_pipeline_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_training_pipeline), - '__call__') as call: - client.delete_training_pipeline() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.DeleteTrainingPipelineRequest() - - -@pytest.mark.asyncio -async def test_delete_training_pipeline_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.DeleteTrainingPipelineRequest): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.DeleteTrainingPipelineRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_training_pipeline_async_from_dict(): - await test_delete_training_pipeline_async(request_type=dict) - - -def test_delete_training_pipeline_field_headers(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.DeleteTrainingPipelineRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_training_pipeline), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_training_pipeline_field_headers_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.DeleteTrainingPipelineRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_training_pipeline), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_training_pipeline_flattened(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_training_pipeline( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_delete_training_pipeline_flattened_error(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_training_pipeline( - pipeline_service.DeleteTrainingPipelineRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_training_pipeline_flattened_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_training_pipeline( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_training_pipeline_flattened_error_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_training_pipeline( - pipeline_service.DeleteTrainingPipelineRequest(), - name='name_value', - ) - - -def test_cancel_training_pipeline(transport: str = 'grpc', request_type=pipeline_service.CancelTrainingPipelineRequest): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.cancel_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.CancelTrainingPipelineRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -def test_cancel_training_pipeline_from_dict(): - test_cancel_training_pipeline(request_type=dict) - - -def test_cancel_training_pipeline_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_training_pipeline), - '__call__') as call: - client.cancel_training_pipeline() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.CancelTrainingPipelineRequest() - - -@pytest.mark.asyncio -async def test_cancel_training_pipeline_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.CancelTrainingPipelineRequest): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.cancel_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.CancelTrainingPipelineRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_cancel_training_pipeline_async_from_dict(): - await test_cancel_training_pipeline_async(request_type=dict) - - -def test_cancel_training_pipeline_field_headers(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.CancelTrainingPipelineRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_training_pipeline), - '__call__') as call: - call.return_value = None - client.cancel_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_cancel_training_pipeline_field_headers_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.CancelTrainingPipelineRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_training_pipeline), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.cancel_training_pipeline(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_cancel_training_pipeline_flattened(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.cancel_training_pipeline( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_cancel_training_pipeline_flattened_error(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.cancel_training_pipeline( - pipeline_service.CancelTrainingPipelineRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_cancel_training_pipeline_flattened_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_training_pipeline), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.cancel_training_pipeline( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_cancel_training_pipeline_flattened_error_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.cancel_training_pipeline( - pipeline_service.CancelTrainingPipelineRequest(), - name='name_value', - ) - - -def test_create_pipeline_job(transport: str = 'grpc', request_type=pipeline_service.CreatePipelineJobRequest): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_pipeline_job.PipelineJob( - name='name_value', - display_name='display_name_value', - state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, - service_account='service_account_value', - network='network_value', - ) - response = client.create_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.CreatePipelineJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_pipeline_job.PipelineJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED - assert response.service_account == 'service_account_value' - assert response.network == 'network_value' - - -def test_create_pipeline_job_from_dict(): - test_create_pipeline_job(request_type=dict) - - -def test_create_pipeline_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_pipeline_job), - '__call__') as call: - client.create_pipeline_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.CreatePipelineJobRequest() - - -@pytest.mark.asyncio -async def test_create_pipeline_job_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.CreatePipelineJobRequest): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_pipeline_job.PipelineJob( - name='name_value', - display_name='display_name_value', - state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, - service_account='service_account_value', - network='network_value', - )) - response = await client.create_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.CreatePipelineJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_pipeline_job.PipelineJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED - assert response.service_account == 'service_account_value' - assert response.network == 'network_value' - - -@pytest.mark.asyncio -async def test_create_pipeline_job_async_from_dict(): - await test_create_pipeline_job_async(request_type=dict) - - -def test_create_pipeline_job_field_headers(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.CreatePipelineJobRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_pipeline_job), - '__call__') as call: - call.return_value = gca_pipeline_job.PipelineJob() - client.create_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_pipeline_job_field_headers_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.CreatePipelineJobRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_pipeline_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_pipeline_job.PipelineJob()) - await client.create_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_pipeline_job_flattened(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_pipeline_job.PipelineJob() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_pipeline_job( - parent='parent_value', - pipeline_job=gca_pipeline_job.PipelineJob(name='name_value'), - pipeline_job_id='pipeline_job_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].pipeline_job - mock_val = gca_pipeline_job.PipelineJob(name='name_value') - assert arg == mock_val - arg = args[0].pipeline_job_id - mock_val = 'pipeline_job_id_value' - assert arg == mock_val - - -def test_create_pipeline_job_flattened_error(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_pipeline_job( - pipeline_service.CreatePipelineJobRequest(), - parent='parent_value', - pipeline_job=gca_pipeline_job.PipelineJob(name='name_value'), - pipeline_job_id='pipeline_job_id_value', - ) - - -@pytest.mark.asyncio -async def test_create_pipeline_job_flattened_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_pipeline_job.PipelineJob() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_pipeline_job.PipelineJob()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_pipeline_job( - parent='parent_value', - pipeline_job=gca_pipeline_job.PipelineJob(name='name_value'), - pipeline_job_id='pipeline_job_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].pipeline_job - mock_val = gca_pipeline_job.PipelineJob(name='name_value') - assert arg == mock_val - arg = args[0].pipeline_job_id - mock_val = 'pipeline_job_id_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_pipeline_job_flattened_error_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_pipeline_job( - pipeline_service.CreatePipelineJobRequest(), - parent='parent_value', - pipeline_job=gca_pipeline_job.PipelineJob(name='name_value'), - pipeline_job_id='pipeline_job_id_value', - ) - - -def test_get_pipeline_job(transport: str = 'grpc', request_type=pipeline_service.GetPipelineJobRequest): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = pipeline_job.PipelineJob( - name='name_value', - display_name='display_name_value', - state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, - service_account='service_account_value', - network='network_value', - ) - response = client.get_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.GetPipelineJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pipeline_job.PipelineJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED - assert response.service_account == 'service_account_value' - assert response.network == 'network_value' - - -def test_get_pipeline_job_from_dict(): - test_get_pipeline_job(request_type=dict) - - -def test_get_pipeline_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_pipeline_job), - '__call__') as call: - client.get_pipeline_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.GetPipelineJobRequest() - - -@pytest.mark.asyncio -async def test_get_pipeline_job_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.GetPipelineJobRequest): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(pipeline_job.PipelineJob( - name='name_value', - display_name='display_name_value', - state=pipeline_state.PipelineState.PIPELINE_STATE_QUEUED, - service_account='service_account_value', - network='network_value', - )) - response = await client.get_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.GetPipelineJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pipeline_job.PipelineJob) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == pipeline_state.PipelineState.PIPELINE_STATE_QUEUED - assert response.service_account == 'service_account_value' - assert response.network == 'network_value' - - -@pytest.mark.asyncio -async def test_get_pipeline_job_async_from_dict(): - await test_get_pipeline_job_async(request_type=dict) - - -def test_get_pipeline_job_field_headers(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.GetPipelineJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_pipeline_job), - '__call__') as call: - call.return_value = pipeline_job.PipelineJob() - client.get_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_pipeline_job_field_headers_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.GetPipelineJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_pipeline_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pipeline_job.PipelineJob()) - await client.get_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_pipeline_job_flattened(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = pipeline_job.PipelineJob() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_pipeline_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_pipeline_job_flattened_error(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_pipeline_job( - pipeline_service.GetPipelineJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_pipeline_job_flattened_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = pipeline_job.PipelineJob() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pipeline_job.PipelineJob()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_pipeline_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_pipeline_job_flattened_error_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_pipeline_job( - pipeline_service.GetPipelineJobRequest(), - name='name_value', - ) - - -def test_list_pipeline_jobs(transport: str = 'grpc', request_type=pipeline_service.ListPipelineJobsRequest): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_pipeline_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = pipeline_service.ListPipelineJobsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_pipeline_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.ListPipelineJobsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListPipelineJobsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_pipeline_jobs_from_dict(): - test_list_pipeline_jobs(request_type=dict) - - -def test_list_pipeline_jobs_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_pipeline_jobs), - '__call__') as call: - client.list_pipeline_jobs() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.ListPipelineJobsRequest() - - -@pytest.mark.asyncio -async def test_list_pipeline_jobs_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.ListPipelineJobsRequest): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_pipeline_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(pipeline_service.ListPipelineJobsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_pipeline_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.ListPipelineJobsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListPipelineJobsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_pipeline_jobs_async_from_dict(): - await test_list_pipeline_jobs_async(request_type=dict) - - -def test_list_pipeline_jobs_field_headers(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.ListPipelineJobsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_pipeline_jobs), - '__call__') as call: - call.return_value = pipeline_service.ListPipelineJobsResponse() - client.list_pipeline_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_pipeline_jobs_field_headers_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.ListPipelineJobsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_pipeline_jobs), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pipeline_service.ListPipelineJobsResponse()) - await client.list_pipeline_jobs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_pipeline_jobs_flattened(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_pipeline_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = pipeline_service.ListPipelineJobsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_pipeline_jobs( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_pipeline_jobs_flattened_error(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_pipeline_jobs( - pipeline_service.ListPipelineJobsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_pipeline_jobs_flattened_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_pipeline_jobs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = pipeline_service.ListPipelineJobsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(pipeline_service.ListPipelineJobsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_pipeline_jobs( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_pipeline_jobs_flattened_error_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_pipeline_jobs( - pipeline_service.ListPipelineJobsRequest(), - parent='parent_value', - ) - - -def test_list_pipeline_jobs_pager(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_pipeline_jobs), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[ - pipeline_job.PipelineJob(), - pipeline_job.PipelineJob(), - pipeline_job.PipelineJob(), - ], - next_page_token='abc', - ), - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[], - next_page_token='def', - ), - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[ - pipeline_job.PipelineJob(), - ], - next_page_token='ghi', - ), - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[ - pipeline_job.PipelineJob(), - pipeline_job.PipelineJob(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_pipeline_jobs(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, pipeline_job.PipelineJob) - for i in results) - -def test_list_pipeline_jobs_pages(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_pipeline_jobs), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[ - pipeline_job.PipelineJob(), - pipeline_job.PipelineJob(), - pipeline_job.PipelineJob(), - ], - next_page_token='abc', - ), - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[], - next_page_token='def', - ), - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[ - pipeline_job.PipelineJob(), - ], - next_page_token='ghi', - ), - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[ - pipeline_job.PipelineJob(), - pipeline_job.PipelineJob(), - ], - ), - RuntimeError, - ) - pages = list(client.list_pipeline_jobs(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_pipeline_jobs_async_pager(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_pipeline_jobs), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[ - pipeline_job.PipelineJob(), - pipeline_job.PipelineJob(), - pipeline_job.PipelineJob(), - ], - next_page_token='abc', - ), - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[], - next_page_token='def', - ), - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[ - pipeline_job.PipelineJob(), - ], - next_page_token='ghi', - ), - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[ - pipeline_job.PipelineJob(), - pipeline_job.PipelineJob(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_pipeline_jobs(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, pipeline_job.PipelineJob) - for i in responses) - -@pytest.mark.asyncio -async def test_list_pipeline_jobs_async_pages(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_pipeline_jobs), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[ - pipeline_job.PipelineJob(), - pipeline_job.PipelineJob(), - pipeline_job.PipelineJob(), - ], - next_page_token='abc', - ), - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[], - next_page_token='def', - ), - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[ - pipeline_job.PipelineJob(), - ], - next_page_token='ghi', - ), - pipeline_service.ListPipelineJobsResponse( - pipeline_jobs=[ - pipeline_job.PipelineJob(), - pipeline_job.PipelineJob(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_pipeline_jobs(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_delete_pipeline_job(transport: str = 'grpc', request_type=pipeline_service.DeletePipelineJobRequest): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.DeletePipelineJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_pipeline_job_from_dict(): - test_delete_pipeline_job(request_type=dict) - - -def test_delete_pipeline_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_pipeline_job), - '__call__') as call: - client.delete_pipeline_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.DeletePipelineJobRequest() - - -@pytest.mark.asyncio -async def test_delete_pipeline_job_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.DeletePipelineJobRequest): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.DeletePipelineJobRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_pipeline_job_async_from_dict(): - await test_delete_pipeline_job_async(request_type=dict) - - -def test_delete_pipeline_job_field_headers(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.DeletePipelineJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_pipeline_job), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_pipeline_job_field_headers_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.DeletePipelineJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_pipeline_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_pipeline_job_flattened(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_pipeline_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_delete_pipeline_job_flattened_error(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_pipeline_job( - pipeline_service.DeletePipelineJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_pipeline_job_flattened_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_pipeline_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_pipeline_job_flattened_error_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_pipeline_job( - pipeline_service.DeletePipelineJobRequest(), - name='name_value', - ) - - -def test_cancel_pipeline_job(transport: str = 'grpc', request_type=pipeline_service.CancelPipelineJobRequest): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.cancel_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.CancelPipelineJobRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -def test_cancel_pipeline_job_from_dict(): - test_cancel_pipeline_job(request_type=dict) - - -def test_cancel_pipeline_job_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_pipeline_job), - '__call__') as call: - client.cancel_pipeline_job() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.CancelPipelineJobRequest() - - -@pytest.mark.asyncio -async def test_cancel_pipeline_job_async(transport: str = 'grpc_asyncio', request_type=pipeline_service.CancelPipelineJobRequest): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.cancel_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == pipeline_service.CancelPipelineJobRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_cancel_pipeline_job_async_from_dict(): - await test_cancel_pipeline_job_async(request_type=dict) - - -def test_cancel_pipeline_job_field_headers(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.CancelPipelineJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_pipeline_job), - '__call__') as call: - call.return_value = None - client.cancel_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_cancel_pipeline_job_field_headers_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = pipeline_service.CancelPipelineJobRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_pipeline_job), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.cancel_pipeline_job(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_cancel_pipeline_job_flattened(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.cancel_pipeline_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_cancel_pipeline_job_flattened_error(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.cancel_pipeline_job( - pipeline_service.CancelPipelineJobRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_cancel_pipeline_job_flattened_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_pipeline_job), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.cancel_pipeline_job( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_cancel_pipeline_job_flattened_error_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.cancel_pipeline_job( - pipeline_service.CancelPipelineJobRequest(), - name='name_value', - ) - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.PipelineServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.PipelineServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = PipelineServiceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.PipelineServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = PipelineServiceClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.PipelineServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = PipelineServiceClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.PipelineServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.PipelineServiceGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.PipelineServiceGrpcTransport, - transports.PipelineServiceGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.PipelineServiceGrpcTransport, - ) - -def test_pipeline_service_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.PipelineServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_pipeline_service_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.aiplatform_v1beta1.services.pipeline_service.transports.PipelineServiceTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.PipelineServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'create_training_pipeline', - 'get_training_pipeline', - 'list_training_pipelines', - 'delete_training_pipeline', - 'cancel_training_pipeline', - 'create_pipeline_job', - 'get_pipeline_job', - 'list_pipeline_jobs', - 'delete_pipeline_job', - 'cancel_pipeline_job', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - with pytest.raises(NotImplementedError): - transport.close() - - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client - - -def test_pipeline_service_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.pipeline_service.transports.PipelineServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.PipelineServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -def test_pipeline_service_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.pipeline_service.transports.PipelineServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.PipelineServiceTransport() - adc.assert_called_once() - - -def test_pipeline_service_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - PipelineServiceClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.PipelineServiceGrpcTransport, - transports.PipelineServiceGrpcAsyncIOTransport, - ], -) -def test_pipeline_service_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.PipelineServiceGrpcTransport, grpc_helpers), - (transports.PipelineServiceGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_pipeline_service_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "aiplatform.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="aiplatform.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.PipelineServiceGrpcTransport, transports.PipelineServiceGrpcAsyncIOTransport]) -def test_pipeline_service_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_pipeline_service_host_no_port(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_pipeline_service_host_with_port(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:8000' - -def test_pipeline_service_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.PipelineServiceGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_pipeline_service_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.PipelineServiceGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.PipelineServiceGrpcTransport, transports.PipelineServiceGrpcAsyncIOTransport]) -def test_pipeline_service_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.PipelineServiceGrpcTransport, transports.PipelineServiceGrpcAsyncIOTransport]) -def test_pipeline_service_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_pipeline_service_grpc_lro_client(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_pipeline_service_grpc_lro_async_client(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc_asyncio', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsAsyncClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_artifact_path(): - project = "squid" - location = "clam" - metadata_store = "whelk" - artifact = "octopus" - expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}".format(project=project, location=location, metadata_store=metadata_store, artifact=artifact, ) - actual = PipelineServiceClient.artifact_path(project, location, metadata_store, artifact) - assert expected == actual - - -def test_parse_artifact_path(): - expected = { - "project": "oyster", - "location": "nudibranch", - "metadata_store": "cuttlefish", - "artifact": "mussel", - } - path = PipelineServiceClient.artifact_path(**expected) - - # Check that the path construction is reversible. - actual = PipelineServiceClient.parse_artifact_path(path) - assert expected == actual - -def test_context_path(): - project = "winkle" - location = "nautilus" - metadata_store = "scallop" - context = "abalone" - expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}".format(project=project, location=location, metadata_store=metadata_store, context=context, ) - actual = PipelineServiceClient.context_path(project, location, metadata_store, context) - assert expected == actual - - -def test_parse_context_path(): - expected = { - "project": "squid", - "location": "clam", - "metadata_store": "whelk", - "context": "octopus", - } - path = PipelineServiceClient.context_path(**expected) - - # Check that the path construction is reversible. - actual = PipelineServiceClient.parse_context_path(path) - assert expected == actual - -def test_custom_job_path(): - project = "oyster" - location = "nudibranch" - custom_job = "cuttlefish" - expected = "projects/{project}/locations/{location}/customJobs/{custom_job}".format(project=project, location=location, custom_job=custom_job, ) - actual = PipelineServiceClient.custom_job_path(project, location, custom_job) - assert expected == actual - - -def test_parse_custom_job_path(): - expected = { - "project": "mussel", - "location": "winkle", - "custom_job": "nautilus", - } - path = PipelineServiceClient.custom_job_path(**expected) - - # Check that the path construction is reversible. - actual = PipelineServiceClient.parse_custom_job_path(path) - assert expected == actual - -def test_endpoint_path(): - project = "scallop" - location = "abalone" - endpoint = "squid" - expected = "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) - actual = PipelineServiceClient.endpoint_path(project, location, endpoint) - assert expected == actual - - -def test_parse_endpoint_path(): - expected = { - "project": "clam", - "location": "whelk", - "endpoint": "octopus", - } - path = PipelineServiceClient.endpoint_path(**expected) - - # Check that the path construction is reversible. - actual = PipelineServiceClient.parse_endpoint_path(path) - assert expected == actual - -def test_execution_path(): - project = "oyster" - location = "nudibranch" - metadata_store = "cuttlefish" - execution = "mussel" - expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/executions/{execution}".format(project=project, location=location, metadata_store=metadata_store, execution=execution, ) - actual = PipelineServiceClient.execution_path(project, location, metadata_store, execution) - assert expected == actual - - -def test_parse_execution_path(): - expected = { - "project": "winkle", - "location": "nautilus", - "metadata_store": "scallop", - "execution": "abalone", - } - path = PipelineServiceClient.execution_path(**expected) - - # Check that the path construction is reversible. - actual = PipelineServiceClient.parse_execution_path(path) - assert expected == actual - -def test_model_path(): - project = "squid" - location = "clam" - model = "whelk" - expected = "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) - actual = PipelineServiceClient.model_path(project, location, model) - assert expected == actual - - -def test_parse_model_path(): - expected = { - "project": "octopus", - "location": "oyster", - "model": "nudibranch", - } - path = PipelineServiceClient.model_path(**expected) - - # Check that the path construction is reversible. - actual = PipelineServiceClient.parse_model_path(path) - assert expected == actual - -def test_network_path(): - project = "cuttlefish" - network = "mussel" - expected = "projects/{project}/global/networks/{network}".format(project=project, network=network, ) - actual = PipelineServiceClient.network_path(project, network) - assert expected == actual - - -def test_parse_network_path(): - expected = { - "project": "winkle", - "network": "nautilus", - } - path = PipelineServiceClient.network_path(**expected) - - # Check that the path construction is reversible. - actual = PipelineServiceClient.parse_network_path(path) - assert expected == actual - -def test_pipeline_job_path(): - project = "scallop" - location = "abalone" - pipeline_job = "squid" - expected = "projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}".format(project=project, location=location, pipeline_job=pipeline_job, ) - actual = PipelineServiceClient.pipeline_job_path(project, location, pipeline_job) - assert expected == actual - - -def test_parse_pipeline_job_path(): - expected = { - "project": "clam", - "location": "whelk", - "pipeline_job": "octopus", - } - path = PipelineServiceClient.pipeline_job_path(**expected) - - # Check that the path construction is reversible. - actual = PipelineServiceClient.parse_pipeline_job_path(path) - assert expected == actual - -def test_training_pipeline_path(): - project = "oyster" - location = "nudibranch" - training_pipeline = "cuttlefish" - expected = "projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}".format(project=project, location=location, training_pipeline=training_pipeline, ) - actual = PipelineServiceClient.training_pipeline_path(project, location, training_pipeline) - assert expected == actual - - -def test_parse_training_pipeline_path(): - expected = { - "project": "mussel", - "location": "winkle", - "training_pipeline": "nautilus", - } - path = PipelineServiceClient.training_pipeline_path(**expected) - - # Check that the path construction is reversible. - actual = PipelineServiceClient.parse_training_pipeline_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "scallop" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = PipelineServiceClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "abalone", - } - path = PipelineServiceClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = PipelineServiceClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "squid" - expected = "folders/{folder}".format(folder=folder, ) - actual = PipelineServiceClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "clam", - } - path = PipelineServiceClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = PipelineServiceClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "whelk" - expected = "organizations/{organization}".format(organization=organization, ) - actual = PipelineServiceClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "octopus", - } - path = PipelineServiceClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = PipelineServiceClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "oyster" - expected = "projects/{project}".format(project=project, ) - actual = PipelineServiceClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "nudibranch", - } - path = PipelineServiceClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = PipelineServiceClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "cuttlefish" - location = "mussel" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = PipelineServiceClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "winkle", - "location": "nautilus", - } - path = PipelineServiceClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = PipelineServiceClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.PipelineServiceTransport, '_prep_wrapped_messages') as prep: - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.PipelineServiceTransport, '_prep_wrapped_messages') as prep: - transport_class = PipelineServiceClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - -@pytest.mark.asyncio -async def test_transport_close_async(): - client = PipelineServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: - async with client: - close.assert_not_called() - close.assert_called_once() - -def test_transport_close(): - transports = { - "grpc": "_grpc_channel", - } - - for transport, close_name in transports.items(): - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: - with client: - close.assert_not_called() - close.assert_called_once() - -def test_client_ctx(): - transports = [ - 'grpc', - ] - for transport in transports: - client = PipelineServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - # Test client calls underlying transport. - with mock.patch.object(type(client.transport), "close") as close: - close.assert_not_called() - with client: - pass - close.assert_called() diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_prediction_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_prediction_service.py deleted file mode 100644 index fff060a260..0000000000 --- a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_prediction_service.py +++ /dev/null @@ -1,1735 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api import httpbody_pb2 # type: ignore -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import path_template -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.aiplatform_v1beta1.services.prediction_service import PredictionServiceAsyncClient -from google.cloud.aiplatform_v1beta1.services.prediction_service import PredictionServiceClient -from google.cloud.aiplatform_v1beta1.services.prediction_service import transports -from google.cloud.aiplatform_v1beta1.types import explanation -from google.cloud.aiplatform_v1beta1.types import io -from google.cloud.aiplatform_v1beta1.types import prediction_service -from google.oauth2 import service_account -from google.protobuf import any_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -import google.auth - - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert PredictionServiceClient._get_default_mtls_endpoint(None) is None - assert PredictionServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert PredictionServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert PredictionServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert PredictionServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert PredictionServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - PredictionServiceClient, - PredictionServiceAsyncClient, -]) -def test_prediction_service_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.PredictionServiceGrpcTransport, "grpc"), - (transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_prediction_service_client_service_account_always_use_jwt(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=False) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("client_class", [ - PredictionServiceClient, - PredictionServiceAsyncClient, -]) -def test_prediction_service_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_prediction_service_client_get_transport_class(): - transport = PredictionServiceClient.get_transport_class() - available_transports = [ - transports.PredictionServiceGrpcTransport, - ] - assert transport in available_transports - - transport = PredictionServiceClient.get_transport_class("grpc") - assert transport == transports.PredictionServiceGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc"), - (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(PredictionServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceClient)) -@mock.patch.object(PredictionServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceAsyncClient)) -def test_prediction_service_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(PredictionServiceClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(PredictionServiceClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc", "true"), - (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc", "false"), - (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(PredictionServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceClient)) -@mock.patch.object(PredictionServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PredictionServiceAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_prediction_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc"), - (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_prediction_service_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc"), - (PredictionServiceAsyncClient, transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_prediction_service_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_prediction_service_client_client_options_from_dict(): - with mock.patch('google.cloud.aiplatform_v1beta1.services.prediction_service.transports.PredictionServiceGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = PredictionServiceClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_predict(transport: str = 'grpc', request_type=prediction_service.PredictRequest): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.predict), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = prediction_service.PredictResponse( - deployed_model_id='deployed_model_id_value', - model='model_value', - model_display_name='model_display_name_value', - ) - response = client.predict(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == prediction_service.PredictRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, prediction_service.PredictResponse) - assert response.deployed_model_id == 'deployed_model_id_value' - assert response.model == 'model_value' - assert response.model_display_name == 'model_display_name_value' - - -def test_predict_from_dict(): - test_predict(request_type=dict) - - -def test_predict_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.predict), - '__call__') as call: - client.predict() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == prediction_service.PredictRequest() - - -@pytest.mark.asyncio -async def test_predict_async(transport: str = 'grpc_asyncio', request_type=prediction_service.PredictRequest): - client = PredictionServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.predict), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(prediction_service.PredictResponse( - deployed_model_id='deployed_model_id_value', - model='model_value', - model_display_name='model_display_name_value', - )) - response = await client.predict(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == prediction_service.PredictRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, prediction_service.PredictResponse) - assert response.deployed_model_id == 'deployed_model_id_value' - assert response.model == 'model_value' - assert response.model_display_name == 'model_display_name_value' - - -@pytest.mark.asyncio -async def test_predict_async_from_dict(): - await test_predict_async(request_type=dict) - - -def test_predict_field_headers(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = prediction_service.PredictRequest() - - request.endpoint = 'endpoint/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.predict), - '__call__') as call: - call.return_value = prediction_service.PredictResponse() - client.predict(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'endpoint=endpoint/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_predict_field_headers_async(): - client = PredictionServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = prediction_service.PredictRequest() - - request.endpoint = 'endpoint/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.predict), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(prediction_service.PredictResponse()) - await client.predict(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'endpoint=endpoint/value', - ) in kw['metadata'] - - -def test_predict_flattened(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.predict), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = prediction_service.PredictResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.predict( - endpoint='endpoint_value', - instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], - parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].endpoint - mock_val = 'endpoint_value' - assert arg == mock_val - arg = args[0].instances - mock_val = [struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)] - assert arg == mock_val - arg = args[0].parameters - mock_val = struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE) - from proto.marshal import Marshal - from proto.marshal.rules.struct import ValueRule - rule = ValueRule(marshal=Marshal(name="Test")) - mock_val = rule.to_python(mock_val) - assert arg == mock_val - - -def test_predict_flattened_error(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.predict( - prediction_service.PredictRequest(), - endpoint='endpoint_value', - instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], - parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), - ) - - -@pytest.mark.asyncio -async def test_predict_flattened_async(): - client = PredictionServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.predict), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = prediction_service.PredictResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(prediction_service.PredictResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.predict( - endpoint='endpoint_value', - instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], - parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].endpoint - mock_val = 'endpoint_value' - assert arg == mock_val - arg = args[0].instances - mock_val = [struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)] - assert arg == mock_val - arg = args[0].parameters - mock_val = struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE) - from proto.marshal import Marshal - from proto.marshal.rules.struct import ValueRule - rule = ValueRule(marshal=Marshal(name="Test")) - mock_val = rule.to_python(mock_val) - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_predict_flattened_error_async(): - client = PredictionServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.predict( - prediction_service.PredictRequest(), - endpoint='endpoint_value', - instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], - parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), - ) - - -def test_raw_predict(transport: str = 'grpc', request_type=prediction_service.RawPredictRequest): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.raw_predict), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = httpbody_pb2.HttpBody( - content_type='content_type_value', - data=b'data_blob', - ) - response = client.raw_predict(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == prediction_service.RawPredictRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, httpbody_pb2.HttpBody) - assert response.content_type == 'content_type_value' - assert response.data == b'data_blob' - - -def test_raw_predict_from_dict(): - test_raw_predict(request_type=dict) - - -def test_raw_predict_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.raw_predict), - '__call__') as call: - client.raw_predict() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == prediction_service.RawPredictRequest() - - -@pytest.mark.asyncio -async def test_raw_predict_async(transport: str = 'grpc_asyncio', request_type=prediction_service.RawPredictRequest): - client = PredictionServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.raw_predict), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(httpbody_pb2.HttpBody( - content_type='content_type_value', - data=b'data_blob', - )) - response = await client.raw_predict(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == prediction_service.RawPredictRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, httpbody_pb2.HttpBody) - assert response.content_type == 'content_type_value' - assert response.data == b'data_blob' - - -@pytest.mark.asyncio -async def test_raw_predict_async_from_dict(): - await test_raw_predict_async(request_type=dict) - - -def test_raw_predict_field_headers(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = prediction_service.RawPredictRequest() - - request.endpoint = 'endpoint/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.raw_predict), - '__call__') as call: - call.return_value = httpbody_pb2.HttpBody() - client.raw_predict(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'endpoint=endpoint/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_raw_predict_field_headers_async(): - client = PredictionServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = prediction_service.RawPredictRequest() - - request.endpoint = 'endpoint/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.raw_predict), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(httpbody_pb2.HttpBody()) - await client.raw_predict(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'endpoint=endpoint/value', - ) in kw['metadata'] - - -def test_raw_predict_flattened(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.raw_predict), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = httpbody_pb2.HttpBody() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.raw_predict( - endpoint='endpoint_value', - http_body=httpbody_pb2.HttpBody(content_type='content_type_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].endpoint - mock_val = 'endpoint_value' - assert arg == mock_val - arg = args[0].http_body - mock_val = httpbody_pb2.HttpBody(content_type='content_type_value') - assert arg == mock_val - - -def test_raw_predict_flattened_error(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.raw_predict( - prediction_service.RawPredictRequest(), - endpoint='endpoint_value', - http_body=httpbody_pb2.HttpBody(content_type='content_type_value'), - ) - - -@pytest.mark.asyncio -async def test_raw_predict_flattened_async(): - client = PredictionServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.raw_predict), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = httpbody_pb2.HttpBody() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(httpbody_pb2.HttpBody()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.raw_predict( - endpoint='endpoint_value', - http_body=httpbody_pb2.HttpBody(content_type='content_type_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].endpoint - mock_val = 'endpoint_value' - assert arg == mock_val - arg = args[0].http_body - mock_val = httpbody_pb2.HttpBody(content_type='content_type_value') - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_raw_predict_flattened_error_async(): - client = PredictionServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.raw_predict( - prediction_service.RawPredictRequest(), - endpoint='endpoint_value', - http_body=httpbody_pb2.HttpBody(content_type='content_type_value'), - ) - - -def test_explain(transport: str = 'grpc', request_type=prediction_service.ExplainRequest): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.explain), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = prediction_service.ExplainResponse( - deployed_model_id='deployed_model_id_value', - ) - response = client.explain(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == prediction_service.ExplainRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, prediction_service.ExplainResponse) - assert response.deployed_model_id == 'deployed_model_id_value' - - -def test_explain_from_dict(): - test_explain(request_type=dict) - - -def test_explain_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.explain), - '__call__') as call: - client.explain() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == prediction_service.ExplainRequest() - - -@pytest.mark.asyncio -async def test_explain_async(transport: str = 'grpc_asyncio', request_type=prediction_service.ExplainRequest): - client = PredictionServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.explain), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(prediction_service.ExplainResponse( - deployed_model_id='deployed_model_id_value', - )) - response = await client.explain(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == prediction_service.ExplainRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, prediction_service.ExplainResponse) - assert response.deployed_model_id == 'deployed_model_id_value' - - -@pytest.mark.asyncio -async def test_explain_async_from_dict(): - await test_explain_async(request_type=dict) - - -def test_explain_field_headers(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = prediction_service.ExplainRequest() - - request.endpoint = 'endpoint/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.explain), - '__call__') as call: - call.return_value = prediction_service.ExplainResponse() - client.explain(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'endpoint=endpoint/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_explain_field_headers_async(): - client = PredictionServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = prediction_service.ExplainRequest() - - request.endpoint = 'endpoint/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.explain), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(prediction_service.ExplainResponse()) - await client.explain(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'endpoint=endpoint/value', - ) in kw['metadata'] - - -def test_explain_flattened(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.explain), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = prediction_service.ExplainResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.explain( - endpoint='endpoint_value', - instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], - parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), - deployed_model_id='deployed_model_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].endpoint - mock_val = 'endpoint_value' - assert arg == mock_val - arg = args[0].instances - mock_val = [struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)] - assert arg == mock_val - arg = args[0].parameters - mock_val = struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE) - from proto.marshal import Marshal - from proto.marshal.rules.struct import ValueRule - rule = ValueRule(marshal=Marshal(name="Test")) - mock_val = rule.to_python(mock_val) - assert arg == mock_val - arg = args[0].deployed_model_id - mock_val = 'deployed_model_id_value' - assert arg == mock_val - - -def test_explain_flattened_error(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.explain( - prediction_service.ExplainRequest(), - endpoint='endpoint_value', - instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], - parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), - deployed_model_id='deployed_model_id_value', - ) - - -@pytest.mark.asyncio -async def test_explain_flattened_async(): - client = PredictionServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.explain), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = prediction_service.ExplainResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(prediction_service.ExplainResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.explain( - endpoint='endpoint_value', - instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], - parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), - deployed_model_id='deployed_model_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].endpoint - mock_val = 'endpoint_value' - assert arg == mock_val - arg = args[0].instances - mock_val = [struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)] - assert arg == mock_val - arg = args[0].parameters - mock_val = struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE) - from proto.marshal import Marshal - from proto.marshal.rules.struct import ValueRule - rule = ValueRule(marshal=Marshal(name="Test")) - mock_val = rule.to_python(mock_val) - assert arg == mock_val - arg = args[0].deployed_model_id - mock_val = 'deployed_model_id_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_explain_flattened_error_async(): - client = PredictionServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.explain( - prediction_service.ExplainRequest(), - endpoint='endpoint_value', - instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)], - parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE), - deployed_model_id='deployed_model_id_value', - ) - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.PredictionServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.PredictionServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = PredictionServiceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.PredictionServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = PredictionServiceClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.PredictionServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = PredictionServiceClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.PredictionServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.PredictionServiceGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.PredictionServiceGrpcTransport, - transports.PredictionServiceGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.PredictionServiceGrpcTransport, - ) - -def test_prediction_service_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.PredictionServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_prediction_service_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.aiplatform_v1beta1.services.prediction_service.transports.PredictionServiceTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.PredictionServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'predict', - 'raw_predict', - 'explain', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - with pytest.raises(NotImplementedError): - transport.close() - - -def test_prediction_service_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.prediction_service.transports.PredictionServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.PredictionServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -def test_prediction_service_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.prediction_service.transports.PredictionServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.PredictionServiceTransport() - adc.assert_called_once() - - -def test_prediction_service_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - PredictionServiceClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.PredictionServiceGrpcTransport, - transports.PredictionServiceGrpcAsyncIOTransport, - ], -) -def test_prediction_service_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.PredictionServiceGrpcTransport, grpc_helpers), - (transports.PredictionServiceGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_prediction_service_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "aiplatform.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="aiplatform.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.PredictionServiceGrpcTransport, transports.PredictionServiceGrpcAsyncIOTransport]) -def test_prediction_service_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_prediction_service_host_no_port(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_prediction_service_host_with_port(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:8000' - -def test_prediction_service_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.PredictionServiceGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_prediction_service_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.PredictionServiceGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.PredictionServiceGrpcTransport, transports.PredictionServiceGrpcAsyncIOTransport]) -def test_prediction_service_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.PredictionServiceGrpcTransport, transports.PredictionServiceGrpcAsyncIOTransport]) -def test_prediction_service_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_endpoint_path(): - project = "squid" - location = "clam" - endpoint = "whelk" - expected = "projects/{project}/locations/{location}/endpoints/{endpoint}".format(project=project, location=location, endpoint=endpoint, ) - actual = PredictionServiceClient.endpoint_path(project, location, endpoint) - assert expected == actual - - -def test_parse_endpoint_path(): - expected = { - "project": "octopus", - "location": "oyster", - "endpoint": "nudibranch", - } - path = PredictionServiceClient.endpoint_path(**expected) - - # Check that the path construction is reversible. - actual = PredictionServiceClient.parse_endpoint_path(path) - assert expected == actual - -def test_model_path(): - project = "cuttlefish" - location = "mussel" - model = "winkle" - expected = "projects/{project}/locations/{location}/models/{model}".format(project=project, location=location, model=model, ) - actual = PredictionServiceClient.model_path(project, location, model) - assert expected == actual - - -def test_parse_model_path(): - expected = { - "project": "nautilus", - "location": "scallop", - "model": "abalone", - } - path = PredictionServiceClient.model_path(**expected) - - # Check that the path construction is reversible. - actual = PredictionServiceClient.parse_model_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "squid" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = PredictionServiceClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "clam", - } - path = PredictionServiceClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = PredictionServiceClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "whelk" - expected = "folders/{folder}".format(folder=folder, ) - actual = PredictionServiceClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "octopus", - } - path = PredictionServiceClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = PredictionServiceClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "oyster" - expected = "organizations/{organization}".format(organization=organization, ) - actual = PredictionServiceClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "nudibranch", - } - path = PredictionServiceClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = PredictionServiceClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "cuttlefish" - expected = "projects/{project}".format(project=project, ) - actual = PredictionServiceClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "mussel", - } - path = PredictionServiceClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = PredictionServiceClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "winkle" - location = "nautilus" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = PredictionServiceClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "scallop", - "location": "abalone", - } - path = PredictionServiceClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = PredictionServiceClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.PredictionServiceTransport, '_prep_wrapped_messages') as prep: - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.PredictionServiceTransport, '_prep_wrapped_messages') as prep: - transport_class = PredictionServiceClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - -@pytest.mark.asyncio -async def test_transport_close_async(): - client = PredictionServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: - async with client: - close.assert_not_called() - close.assert_called_once() - -def test_transport_close(): - transports = { - "grpc": "_grpc_channel", - } - - for transport, close_name in transports.items(): - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: - with client: - close.assert_not_called() - close.assert_called_once() - -def test_client_ctx(): - transports = [ - 'grpc', - ] - for transport in transports: - client = PredictionServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - # Test client calls underlying transport. - with mock.patch.object(type(client.transport), "close") as close: - close.assert_not_called() - with client: - pass - close.assert_called() diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_specialist_pool_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_specialist_pool_service.py deleted file mode 100644 index e86d2aef3c..0000000000 --- a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_specialist_pool_service.py +++ /dev/null @@ -1,2361 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import future -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import operation_async # type: ignore -from google.api_core import operations_v1 -from google.api_core import path_template -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.aiplatform_v1beta1.services.specialist_pool_service import SpecialistPoolServiceAsyncClient -from google.cloud.aiplatform_v1beta1.services.specialist_pool_service import SpecialistPoolServiceClient -from google.cloud.aiplatform_v1beta1.services.specialist_pool_service import pagers -from google.cloud.aiplatform_v1beta1.services.specialist_pool_service import transports -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.cloud.aiplatform_v1beta1.types import specialist_pool -from google.cloud.aiplatform_v1beta1.types import specialist_pool as gca_specialist_pool -from google.cloud.aiplatform_v1beta1.types import specialist_pool_service -from google.longrunning import operations_pb2 -from google.oauth2 import service_account -from google.protobuf import field_mask_pb2 # type: ignore -import google.auth - - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert SpecialistPoolServiceClient._get_default_mtls_endpoint(None) is None - assert SpecialistPoolServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert SpecialistPoolServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert SpecialistPoolServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert SpecialistPoolServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert SpecialistPoolServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - SpecialistPoolServiceClient, - SpecialistPoolServiceAsyncClient, -]) -def test_specialist_pool_service_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.SpecialistPoolServiceGrpcTransport, "grpc"), - (transports.SpecialistPoolServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_specialist_pool_service_client_service_account_always_use_jwt(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=False) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("client_class", [ - SpecialistPoolServiceClient, - SpecialistPoolServiceAsyncClient, -]) -def test_specialist_pool_service_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_specialist_pool_service_client_get_transport_class(): - transport = SpecialistPoolServiceClient.get_transport_class() - available_transports = [ - transports.SpecialistPoolServiceGrpcTransport, - ] - assert transport in available_transports - - transport = SpecialistPoolServiceClient.get_transport_class("grpc") - assert transport == transports.SpecialistPoolServiceGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (SpecialistPoolServiceClient, transports.SpecialistPoolServiceGrpcTransport, "grpc"), - (SpecialistPoolServiceAsyncClient, transports.SpecialistPoolServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(SpecialistPoolServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpecialistPoolServiceClient)) -@mock.patch.object(SpecialistPoolServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpecialistPoolServiceAsyncClient)) -def test_specialist_pool_service_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(SpecialistPoolServiceClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(SpecialistPoolServiceClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (SpecialistPoolServiceClient, transports.SpecialistPoolServiceGrpcTransport, "grpc", "true"), - (SpecialistPoolServiceAsyncClient, transports.SpecialistPoolServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (SpecialistPoolServiceClient, transports.SpecialistPoolServiceGrpcTransport, "grpc", "false"), - (SpecialistPoolServiceAsyncClient, transports.SpecialistPoolServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(SpecialistPoolServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpecialistPoolServiceClient)) -@mock.patch.object(SpecialistPoolServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SpecialistPoolServiceAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_specialist_pool_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (SpecialistPoolServiceClient, transports.SpecialistPoolServiceGrpcTransport, "grpc"), - (SpecialistPoolServiceAsyncClient, transports.SpecialistPoolServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_specialist_pool_service_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (SpecialistPoolServiceClient, transports.SpecialistPoolServiceGrpcTransport, "grpc"), - (SpecialistPoolServiceAsyncClient, transports.SpecialistPoolServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_specialist_pool_service_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_specialist_pool_service_client_client_options_from_dict(): - with mock.patch('google.cloud.aiplatform_v1beta1.services.specialist_pool_service.transports.SpecialistPoolServiceGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = SpecialistPoolServiceClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_create_specialist_pool(transport: str = 'grpc', request_type=specialist_pool_service.CreateSpecialistPoolRequest): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.create_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == specialist_pool_service.CreateSpecialistPoolRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_create_specialist_pool_from_dict(): - test_create_specialist_pool(request_type=dict) - - -def test_create_specialist_pool_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_specialist_pool), - '__call__') as call: - client.create_specialist_pool() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == specialist_pool_service.CreateSpecialistPoolRequest() - - -@pytest.mark.asyncio -async def test_create_specialist_pool_async(transport: str = 'grpc_asyncio', request_type=specialist_pool_service.CreateSpecialistPoolRequest): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.create_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == specialist_pool_service.CreateSpecialistPoolRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_create_specialist_pool_async_from_dict(): - await test_create_specialist_pool_async(request_type=dict) - - -def test_create_specialist_pool_field_headers(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = specialist_pool_service.CreateSpecialistPoolRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_specialist_pool), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.create_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_specialist_pool_field_headers_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = specialist_pool_service.CreateSpecialistPoolRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_specialist_pool), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.create_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_specialist_pool_flattened(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_specialist_pool( - parent='parent_value', - specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].specialist_pool - mock_val = gca_specialist_pool.SpecialistPool(name='name_value') - assert arg == mock_val - - -def test_create_specialist_pool_flattened_error(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_specialist_pool( - specialist_pool_service.CreateSpecialistPoolRequest(), - parent='parent_value', - specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), - ) - - -@pytest.mark.asyncio -async def test_create_specialist_pool_flattened_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_specialist_pool( - parent='parent_value', - specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].specialist_pool - mock_val = gca_specialist_pool.SpecialistPool(name='name_value') - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_specialist_pool_flattened_error_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_specialist_pool( - specialist_pool_service.CreateSpecialistPoolRequest(), - parent='parent_value', - specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), - ) - - -def test_get_specialist_pool(transport: str = 'grpc', request_type=specialist_pool_service.GetSpecialistPoolRequest): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = specialist_pool.SpecialistPool( - name='name_value', - display_name='display_name_value', - specialist_managers_count=2662, - specialist_manager_emails=['specialist_manager_emails_value'], - pending_data_labeling_jobs=['pending_data_labeling_jobs_value'], - specialist_worker_emails=['specialist_worker_emails_value'], - ) - response = client.get_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == specialist_pool_service.GetSpecialistPoolRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, specialist_pool.SpecialistPool) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.specialist_managers_count == 2662 - assert response.specialist_manager_emails == ['specialist_manager_emails_value'] - assert response.pending_data_labeling_jobs == ['pending_data_labeling_jobs_value'] - assert response.specialist_worker_emails == ['specialist_worker_emails_value'] - - -def test_get_specialist_pool_from_dict(): - test_get_specialist_pool(request_type=dict) - - -def test_get_specialist_pool_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_specialist_pool), - '__call__') as call: - client.get_specialist_pool() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == specialist_pool_service.GetSpecialistPoolRequest() - - -@pytest.mark.asyncio -async def test_get_specialist_pool_async(transport: str = 'grpc_asyncio', request_type=specialist_pool_service.GetSpecialistPoolRequest): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(specialist_pool.SpecialistPool( - name='name_value', - display_name='display_name_value', - specialist_managers_count=2662, - specialist_manager_emails=['specialist_manager_emails_value'], - pending_data_labeling_jobs=['pending_data_labeling_jobs_value'], - specialist_worker_emails=['specialist_worker_emails_value'], - )) - response = await client.get_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == specialist_pool_service.GetSpecialistPoolRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, specialist_pool.SpecialistPool) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.specialist_managers_count == 2662 - assert response.specialist_manager_emails == ['specialist_manager_emails_value'] - assert response.pending_data_labeling_jobs == ['pending_data_labeling_jobs_value'] - assert response.specialist_worker_emails == ['specialist_worker_emails_value'] - - -@pytest.mark.asyncio -async def test_get_specialist_pool_async_from_dict(): - await test_get_specialist_pool_async(request_type=dict) - - -def test_get_specialist_pool_field_headers(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = specialist_pool_service.GetSpecialistPoolRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_specialist_pool), - '__call__') as call: - call.return_value = specialist_pool.SpecialistPool() - client.get_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_specialist_pool_field_headers_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = specialist_pool_service.GetSpecialistPoolRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_specialist_pool), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(specialist_pool.SpecialistPool()) - await client.get_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_specialist_pool_flattened(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = specialist_pool.SpecialistPool() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_specialist_pool( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_specialist_pool_flattened_error(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_specialist_pool( - specialist_pool_service.GetSpecialistPoolRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_specialist_pool_flattened_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = specialist_pool.SpecialistPool() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(specialist_pool.SpecialistPool()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_specialist_pool( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_specialist_pool_flattened_error_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_specialist_pool( - specialist_pool_service.GetSpecialistPoolRequest(), - name='name_value', - ) - - -def test_list_specialist_pools(transport: str = 'grpc', request_type=specialist_pool_service.ListSpecialistPoolsRequest): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_specialist_pools), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = specialist_pool_service.ListSpecialistPoolsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_specialist_pools(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == specialist_pool_service.ListSpecialistPoolsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListSpecialistPoolsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_specialist_pools_from_dict(): - test_list_specialist_pools(request_type=dict) - - -def test_list_specialist_pools_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_specialist_pools), - '__call__') as call: - client.list_specialist_pools() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == specialist_pool_service.ListSpecialistPoolsRequest() - - -@pytest.mark.asyncio -async def test_list_specialist_pools_async(transport: str = 'grpc_asyncio', request_type=specialist_pool_service.ListSpecialistPoolsRequest): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_specialist_pools), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(specialist_pool_service.ListSpecialistPoolsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_specialist_pools(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == specialist_pool_service.ListSpecialistPoolsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListSpecialistPoolsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_specialist_pools_async_from_dict(): - await test_list_specialist_pools_async(request_type=dict) - - -def test_list_specialist_pools_field_headers(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = specialist_pool_service.ListSpecialistPoolsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_specialist_pools), - '__call__') as call: - call.return_value = specialist_pool_service.ListSpecialistPoolsResponse() - client.list_specialist_pools(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_specialist_pools_field_headers_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = specialist_pool_service.ListSpecialistPoolsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_specialist_pools), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(specialist_pool_service.ListSpecialistPoolsResponse()) - await client.list_specialist_pools(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_specialist_pools_flattened(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_specialist_pools), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = specialist_pool_service.ListSpecialistPoolsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_specialist_pools( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_specialist_pools_flattened_error(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_specialist_pools( - specialist_pool_service.ListSpecialistPoolsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_specialist_pools_flattened_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_specialist_pools), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = specialist_pool_service.ListSpecialistPoolsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(specialist_pool_service.ListSpecialistPoolsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_specialist_pools( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_specialist_pools_flattened_error_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_specialist_pools( - specialist_pool_service.ListSpecialistPoolsRequest(), - parent='parent_value', - ) - - -def test_list_specialist_pools_pager(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_specialist_pools), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[ - specialist_pool.SpecialistPool(), - specialist_pool.SpecialistPool(), - specialist_pool.SpecialistPool(), - ], - next_page_token='abc', - ), - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[], - next_page_token='def', - ), - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[ - specialist_pool.SpecialistPool(), - ], - next_page_token='ghi', - ), - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[ - specialist_pool.SpecialistPool(), - specialist_pool.SpecialistPool(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_specialist_pools(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, specialist_pool.SpecialistPool) - for i in results) - -def test_list_specialist_pools_pages(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_specialist_pools), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[ - specialist_pool.SpecialistPool(), - specialist_pool.SpecialistPool(), - specialist_pool.SpecialistPool(), - ], - next_page_token='abc', - ), - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[], - next_page_token='def', - ), - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[ - specialist_pool.SpecialistPool(), - ], - next_page_token='ghi', - ), - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[ - specialist_pool.SpecialistPool(), - specialist_pool.SpecialistPool(), - ], - ), - RuntimeError, - ) - pages = list(client.list_specialist_pools(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_specialist_pools_async_pager(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_specialist_pools), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[ - specialist_pool.SpecialistPool(), - specialist_pool.SpecialistPool(), - specialist_pool.SpecialistPool(), - ], - next_page_token='abc', - ), - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[], - next_page_token='def', - ), - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[ - specialist_pool.SpecialistPool(), - ], - next_page_token='ghi', - ), - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[ - specialist_pool.SpecialistPool(), - specialist_pool.SpecialistPool(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_specialist_pools(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, specialist_pool.SpecialistPool) - for i in responses) - -@pytest.mark.asyncio -async def test_list_specialist_pools_async_pages(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_specialist_pools), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[ - specialist_pool.SpecialistPool(), - specialist_pool.SpecialistPool(), - specialist_pool.SpecialistPool(), - ], - next_page_token='abc', - ), - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[], - next_page_token='def', - ), - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[ - specialist_pool.SpecialistPool(), - ], - next_page_token='ghi', - ), - specialist_pool_service.ListSpecialistPoolsResponse( - specialist_pools=[ - specialist_pool.SpecialistPool(), - specialist_pool.SpecialistPool(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_specialist_pools(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_delete_specialist_pool(transport: str = 'grpc', request_type=specialist_pool_service.DeleteSpecialistPoolRequest): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == specialist_pool_service.DeleteSpecialistPoolRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_specialist_pool_from_dict(): - test_delete_specialist_pool(request_type=dict) - - -def test_delete_specialist_pool_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_specialist_pool), - '__call__') as call: - client.delete_specialist_pool() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == specialist_pool_service.DeleteSpecialistPoolRequest() - - -@pytest.mark.asyncio -async def test_delete_specialist_pool_async(transport: str = 'grpc_asyncio', request_type=specialist_pool_service.DeleteSpecialistPoolRequest): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == specialist_pool_service.DeleteSpecialistPoolRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_specialist_pool_async_from_dict(): - await test_delete_specialist_pool_async(request_type=dict) - - -def test_delete_specialist_pool_field_headers(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = specialist_pool_service.DeleteSpecialistPoolRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_specialist_pool), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_specialist_pool_field_headers_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = specialist_pool_service.DeleteSpecialistPoolRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_specialist_pool), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_specialist_pool_flattened(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_specialist_pool( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_delete_specialist_pool_flattened_error(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_specialist_pool( - specialist_pool_service.DeleteSpecialistPoolRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_specialist_pool_flattened_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_specialist_pool( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_specialist_pool_flattened_error_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_specialist_pool( - specialist_pool_service.DeleteSpecialistPoolRequest(), - name='name_value', - ) - - -def test_update_specialist_pool(transport: str = 'grpc', request_type=specialist_pool_service.UpdateSpecialistPoolRequest): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.update_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == specialist_pool_service.UpdateSpecialistPoolRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_update_specialist_pool_from_dict(): - test_update_specialist_pool(request_type=dict) - - -def test_update_specialist_pool_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_specialist_pool), - '__call__') as call: - client.update_specialist_pool() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == specialist_pool_service.UpdateSpecialistPoolRequest() - - -@pytest.mark.asyncio -async def test_update_specialist_pool_async(transport: str = 'grpc_asyncio', request_type=specialist_pool_service.UpdateSpecialistPoolRequest): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.update_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == specialist_pool_service.UpdateSpecialistPoolRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_update_specialist_pool_async_from_dict(): - await test_update_specialist_pool_async(request_type=dict) - - -def test_update_specialist_pool_field_headers(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = specialist_pool_service.UpdateSpecialistPoolRequest() - - request.specialist_pool.name = 'specialist_pool.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_specialist_pool), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.update_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'specialist_pool.name=specialist_pool.name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_specialist_pool_field_headers_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = specialist_pool_service.UpdateSpecialistPoolRequest() - - request.specialist_pool.name = 'specialist_pool.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_specialist_pool), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.update_specialist_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'specialist_pool.name=specialist_pool.name/value', - ) in kw['metadata'] - - -def test_update_specialist_pool_flattened(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_specialist_pool( - specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].specialist_pool - mock_val = gca_specialist_pool.SpecialistPool(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -def test_update_specialist_pool_flattened_error(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_specialist_pool( - specialist_pool_service.UpdateSpecialistPoolRequest(), - specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.asyncio -async def test_update_specialist_pool_flattened_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_specialist_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_specialist_pool( - specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].specialist_pool - mock_val = gca_specialist_pool.SpecialistPool(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_update_specialist_pool_flattened_error_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_specialist_pool( - specialist_pool_service.UpdateSpecialistPoolRequest(), - specialist_pool=gca_specialist_pool.SpecialistPool(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.SpecialistPoolServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.SpecialistPoolServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = SpecialistPoolServiceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.SpecialistPoolServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = SpecialistPoolServiceClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.SpecialistPoolServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = SpecialistPoolServiceClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.SpecialistPoolServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.SpecialistPoolServiceGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.SpecialistPoolServiceGrpcTransport, - transports.SpecialistPoolServiceGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.SpecialistPoolServiceGrpcTransport, - ) - -def test_specialist_pool_service_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.SpecialistPoolServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_specialist_pool_service_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.aiplatform_v1beta1.services.specialist_pool_service.transports.SpecialistPoolServiceTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.SpecialistPoolServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'create_specialist_pool', - 'get_specialist_pool', - 'list_specialist_pools', - 'delete_specialist_pool', - 'update_specialist_pool', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - with pytest.raises(NotImplementedError): - transport.close() - - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client - - -def test_specialist_pool_service_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.specialist_pool_service.transports.SpecialistPoolServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.SpecialistPoolServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -def test_specialist_pool_service_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.specialist_pool_service.transports.SpecialistPoolServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.SpecialistPoolServiceTransport() - adc.assert_called_once() - - -def test_specialist_pool_service_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - SpecialistPoolServiceClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.SpecialistPoolServiceGrpcTransport, - transports.SpecialistPoolServiceGrpcAsyncIOTransport, - ], -) -def test_specialist_pool_service_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.SpecialistPoolServiceGrpcTransport, grpc_helpers), - (transports.SpecialistPoolServiceGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_specialist_pool_service_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "aiplatform.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="aiplatform.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.SpecialistPoolServiceGrpcTransport, transports.SpecialistPoolServiceGrpcAsyncIOTransport]) -def test_specialist_pool_service_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_specialist_pool_service_host_no_port(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_specialist_pool_service_host_with_port(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:8000' - -def test_specialist_pool_service_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.SpecialistPoolServiceGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_specialist_pool_service_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.SpecialistPoolServiceGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.SpecialistPoolServiceGrpcTransport, transports.SpecialistPoolServiceGrpcAsyncIOTransport]) -def test_specialist_pool_service_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.SpecialistPoolServiceGrpcTransport, transports.SpecialistPoolServiceGrpcAsyncIOTransport]) -def test_specialist_pool_service_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_specialist_pool_service_grpc_lro_client(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_specialist_pool_service_grpc_lro_async_client(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc_asyncio', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsAsyncClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_specialist_pool_path(): - project = "squid" - location = "clam" - specialist_pool = "whelk" - expected = "projects/{project}/locations/{location}/specialistPools/{specialist_pool}".format(project=project, location=location, specialist_pool=specialist_pool, ) - actual = SpecialistPoolServiceClient.specialist_pool_path(project, location, specialist_pool) - assert expected == actual - - -def test_parse_specialist_pool_path(): - expected = { - "project": "octopus", - "location": "oyster", - "specialist_pool": "nudibranch", - } - path = SpecialistPoolServiceClient.specialist_pool_path(**expected) - - # Check that the path construction is reversible. - actual = SpecialistPoolServiceClient.parse_specialist_pool_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "cuttlefish" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = SpecialistPoolServiceClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "mussel", - } - path = SpecialistPoolServiceClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = SpecialistPoolServiceClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "winkle" - expected = "folders/{folder}".format(folder=folder, ) - actual = SpecialistPoolServiceClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "nautilus", - } - path = SpecialistPoolServiceClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = SpecialistPoolServiceClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "scallop" - expected = "organizations/{organization}".format(organization=organization, ) - actual = SpecialistPoolServiceClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "abalone", - } - path = SpecialistPoolServiceClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = SpecialistPoolServiceClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "squid" - expected = "projects/{project}".format(project=project, ) - actual = SpecialistPoolServiceClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "clam", - } - path = SpecialistPoolServiceClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = SpecialistPoolServiceClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "whelk" - location = "octopus" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = SpecialistPoolServiceClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "oyster", - "location": "nudibranch", - } - path = SpecialistPoolServiceClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = SpecialistPoolServiceClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.SpecialistPoolServiceTransport, '_prep_wrapped_messages') as prep: - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.SpecialistPoolServiceTransport, '_prep_wrapped_messages') as prep: - transport_class = SpecialistPoolServiceClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - -@pytest.mark.asyncio -async def test_transport_close_async(): - client = SpecialistPoolServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: - async with client: - close.assert_not_called() - close.assert_called_once() - -def test_transport_close(): - transports = { - "grpc": "_grpc_channel", - } - - for transport, close_name in transports.items(): - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: - with client: - close.assert_not_called() - close.assert_called_once() - -def test_client_ctx(): - transports = [ - 'grpc', - ] - for transport in transports: - client = SpecialistPoolServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - # Test client calls underlying transport. - with mock.patch.object(type(client.transport), "close") as close: - close.assert_not_called() - with client: - pass - close.assert_called() diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_tensorboard_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_tensorboard_service.py deleted file mode 100644 index 857f56c888..0000000000 --- a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_tensorboard_service.py +++ /dev/null @@ -1,8862 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import future -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import operation_async # type: ignore -from google.api_core import operations_v1 -from google.api_core import path_template -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.aiplatform_v1beta1.services.tensorboard_service import TensorboardServiceAsyncClient -from google.cloud.aiplatform_v1beta1.services.tensorboard_service import TensorboardServiceClient -from google.cloud.aiplatform_v1beta1.services.tensorboard_service import pagers -from google.cloud.aiplatform_v1beta1.services.tensorboard_service import transports -from google.cloud.aiplatform_v1beta1.types import encryption_spec -from google.cloud.aiplatform_v1beta1.types import operation as gca_operation -from google.cloud.aiplatform_v1beta1.types import tensorboard -from google.cloud.aiplatform_v1beta1.types import tensorboard as gca_tensorboard -from google.cloud.aiplatform_v1beta1.types import tensorboard_data -from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment -from google.cloud.aiplatform_v1beta1.types import tensorboard_experiment as gca_tensorboard_experiment -from google.cloud.aiplatform_v1beta1.types import tensorboard_run -from google.cloud.aiplatform_v1beta1.types import tensorboard_run as gca_tensorboard_run -from google.cloud.aiplatform_v1beta1.types import tensorboard_service -from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series -from google.cloud.aiplatform_v1beta1.types import tensorboard_time_series as gca_tensorboard_time_series -from google.longrunning import operations_pb2 -from google.oauth2 import service_account -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -import google.auth - - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert TensorboardServiceClient._get_default_mtls_endpoint(None) is None - assert TensorboardServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert TensorboardServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert TensorboardServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert TensorboardServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert TensorboardServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - TensorboardServiceClient, - TensorboardServiceAsyncClient, -]) -def test_tensorboard_service_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.TensorboardServiceGrpcTransport, "grpc"), - (transports.TensorboardServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_tensorboard_service_client_service_account_always_use_jwt(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=False) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("client_class", [ - TensorboardServiceClient, - TensorboardServiceAsyncClient, -]) -def test_tensorboard_service_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_tensorboard_service_client_get_transport_class(): - transport = TensorboardServiceClient.get_transport_class() - available_transports = [ - transports.TensorboardServiceGrpcTransport, - ] - assert transport in available_transports - - transport = TensorboardServiceClient.get_transport_class("grpc") - assert transport == transports.TensorboardServiceGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (TensorboardServiceClient, transports.TensorboardServiceGrpcTransport, "grpc"), - (TensorboardServiceAsyncClient, transports.TensorboardServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(TensorboardServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TensorboardServiceClient)) -@mock.patch.object(TensorboardServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TensorboardServiceAsyncClient)) -def test_tensorboard_service_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(TensorboardServiceClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(TensorboardServiceClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (TensorboardServiceClient, transports.TensorboardServiceGrpcTransport, "grpc", "true"), - (TensorboardServiceAsyncClient, transports.TensorboardServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (TensorboardServiceClient, transports.TensorboardServiceGrpcTransport, "grpc", "false"), - (TensorboardServiceAsyncClient, transports.TensorboardServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(TensorboardServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TensorboardServiceClient)) -@mock.patch.object(TensorboardServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TensorboardServiceAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_tensorboard_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (TensorboardServiceClient, transports.TensorboardServiceGrpcTransport, "grpc"), - (TensorboardServiceAsyncClient, transports.TensorboardServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_tensorboard_service_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (TensorboardServiceClient, transports.TensorboardServiceGrpcTransport, "grpc"), - (TensorboardServiceAsyncClient, transports.TensorboardServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_tensorboard_service_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_tensorboard_service_client_client_options_from_dict(): - with mock.patch('google.cloud.aiplatform_v1beta1.services.tensorboard_service.transports.TensorboardServiceGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = TensorboardServiceClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_create_tensorboard(transport: str = 'grpc', request_type=tensorboard_service.CreateTensorboardRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.create_tensorboard(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.CreateTensorboardRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_create_tensorboard_from_dict(): - test_create_tensorboard(request_type=dict) - - -def test_create_tensorboard_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard), - '__call__') as call: - client.create_tensorboard() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.CreateTensorboardRequest() - - -@pytest.mark.asyncio -async def test_create_tensorboard_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.CreateTensorboardRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.create_tensorboard(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.CreateTensorboardRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_create_tensorboard_async_from_dict(): - await test_create_tensorboard_async(request_type=dict) - - -def test_create_tensorboard_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.CreateTensorboardRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.create_tensorboard(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_tensorboard_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.CreateTensorboardRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.create_tensorboard(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_tensorboard_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_tensorboard( - parent='parent_value', - tensorboard=gca_tensorboard.Tensorboard(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].tensorboard - mock_val = gca_tensorboard.Tensorboard(name='name_value') - assert arg == mock_val - - -def test_create_tensorboard_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_tensorboard( - tensorboard_service.CreateTensorboardRequest(), - parent='parent_value', - tensorboard=gca_tensorboard.Tensorboard(name='name_value'), - ) - - -@pytest.mark.asyncio -async def test_create_tensorboard_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_tensorboard( - parent='parent_value', - tensorboard=gca_tensorboard.Tensorboard(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].tensorboard - mock_val = gca_tensorboard.Tensorboard(name='name_value') - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_tensorboard_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_tensorboard( - tensorboard_service.CreateTensorboardRequest(), - parent='parent_value', - tensorboard=gca_tensorboard.Tensorboard(name='name_value'), - ) - - -def test_get_tensorboard(transport: str = 'grpc', request_type=tensorboard_service.GetTensorboardRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard.Tensorboard( - name='name_value', - display_name='display_name_value', - description='description_value', - blob_storage_path_prefix='blob_storage_path_prefix_value', - run_count=989, - etag='etag_value', - ) - response = client.get_tensorboard(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.GetTensorboardRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, tensorboard.Tensorboard) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.blob_storage_path_prefix == 'blob_storage_path_prefix_value' - assert response.run_count == 989 - assert response.etag == 'etag_value' - - -def test_get_tensorboard_from_dict(): - test_get_tensorboard(request_type=dict) - - -def test_get_tensorboard_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard), - '__call__') as call: - client.get_tensorboard() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.GetTensorboardRequest() - - -@pytest.mark.asyncio -async def test_get_tensorboard_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.GetTensorboardRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard.Tensorboard( - name='name_value', - display_name='display_name_value', - description='description_value', - blob_storage_path_prefix='blob_storage_path_prefix_value', - run_count=989, - etag='etag_value', - )) - response = await client.get_tensorboard(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.GetTensorboardRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, tensorboard.Tensorboard) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.blob_storage_path_prefix == 'blob_storage_path_prefix_value' - assert response.run_count == 989 - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_get_tensorboard_async_from_dict(): - await test_get_tensorboard_async(request_type=dict) - - -def test_get_tensorboard_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.GetTensorboardRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard), - '__call__') as call: - call.return_value = tensorboard.Tensorboard() - client.get_tensorboard(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_tensorboard_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.GetTensorboardRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard.Tensorboard()) - await client.get_tensorboard(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_tensorboard_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard.Tensorboard() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_tensorboard( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_tensorboard_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_tensorboard( - tensorboard_service.GetTensorboardRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_tensorboard_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard.Tensorboard() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard.Tensorboard()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_tensorboard( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_tensorboard_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_tensorboard( - tensorboard_service.GetTensorboardRequest(), - name='name_value', - ) - - -def test_update_tensorboard(transport: str = 'grpc', request_type=tensorboard_service.UpdateTensorboardRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.update_tensorboard(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.UpdateTensorboardRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_update_tensorboard_from_dict(): - test_update_tensorboard(request_type=dict) - - -def test_update_tensorboard_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard), - '__call__') as call: - client.update_tensorboard() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.UpdateTensorboardRequest() - - -@pytest.mark.asyncio -async def test_update_tensorboard_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.UpdateTensorboardRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.update_tensorboard(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.UpdateTensorboardRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_update_tensorboard_async_from_dict(): - await test_update_tensorboard_async(request_type=dict) - - -def test_update_tensorboard_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.UpdateTensorboardRequest() - - request.tensorboard.name = 'tensorboard.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.update_tensorboard(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'tensorboard.name=tensorboard.name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_tensorboard_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.UpdateTensorboardRequest() - - request.tensorboard.name = 'tensorboard.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.update_tensorboard(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'tensorboard.name=tensorboard.name/value', - ) in kw['metadata'] - - -def test_update_tensorboard_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_tensorboard( - tensorboard=gca_tensorboard.Tensorboard(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].tensorboard - mock_val = gca_tensorboard.Tensorboard(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -def test_update_tensorboard_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_tensorboard( - tensorboard_service.UpdateTensorboardRequest(), - tensorboard=gca_tensorboard.Tensorboard(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.asyncio -async def test_update_tensorboard_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_tensorboard( - tensorboard=gca_tensorboard.Tensorboard(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].tensorboard - mock_val = gca_tensorboard.Tensorboard(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_update_tensorboard_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_tensorboard( - tensorboard_service.UpdateTensorboardRequest(), - tensorboard=gca_tensorboard.Tensorboard(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_list_tensorboards(transport: str = 'grpc', request_type=tensorboard_service.ListTensorboardsRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboards), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.ListTensorboardsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_tensorboards(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ListTensorboardsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListTensorboardsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_tensorboards_from_dict(): - test_list_tensorboards(request_type=dict) - - -def test_list_tensorboards_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboards), - '__call__') as call: - client.list_tensorboards() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ListTensorboardsRequest() - - -@pytest.mark.asyncio -async def test_list_tensorboards_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.ListTensorboardsRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboards), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_tensorboards(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ListTensorboardsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListTensorboardsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_tensorboards_async_from_dict(): - await test_list_tensorboards_async(request_type=dict) - - -def test_list_tensorboards_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.ListTensorboardsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboards), - '__call__') as call: - call.return_value = tensorboard_service.ListTensorboardsResponse() - client.list_tensorboards(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_tensorboards_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.ListTensorboardsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboards), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardsResponse()) - await client.list_tensorboards(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_tensorboards_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboards), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.ListTensorboardsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_tensorboards( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_tensorboards_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_tensorboards( - tensorboard_service.ListTensorboardsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_tensorboards_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboards), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.ListTensorboardsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_tensorboards( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_tensorboards_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_tensorboards( - tensorboard_service.ListTensorboardsRequest(), - parent='parent_value', - ) - - -def test_list_tensorboards_pager(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboards), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - tensorboard_service.ListTensorboardsResponse( - tensorboards=[ - tensorboard.Tensorboard(), - tensorboard.Tensorboard(), - tensorboard.Tensorboard(), - ], - next_page_token='abc', - ), - tensorboard_service.ListTensorboardsResponse( - tensorboards=[], - next_page_token='def', - ), - tensorboard_service.ListTensorboardsResponse( - tensorboards=[ - tensorboard.Tensorboard(), - ], - next_page_token='ghi', - ), - tensorboard_service.ListTensorboardsResponse( - tensorboards=[ - tensorboard.Tensorboard(), - tensorboard.Tensorboard(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_tensorboards(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, tensorboard.Tensorboard) - for i in results) - -def test_list_tensorboards_pages(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboards), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - tensorboard_service.ListTensorboardsResponse( - tensorboards=[ - tensorboard.Tensorboard(), - tensorboard.Tensorboard(), - tensorboard.Tensorboard(), - ], - next_page_token='abc', - ), - tensorboard_service.ListTensorboardsResponse( - tensorboards=[], - next_page_token='def', - ), - tensorboard_service.ListTensorboardsResponse( - tensorboards=[ - tensorboard.Tensorboard(), - ], - next_page_token='ghi', - ), - tensorboard_service.ListTensorboardsResponse( - tensorboards=[ - tensorboard.Tensorboard(), - tensorboard.Tensorboard(), - ], - ), - RuntimeError, - ) - pages = list(client.list_tensorboards(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_tensorboards_async_pager(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboards), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - tensorboard_service.ListTensorboardsResponse( - tensorboards=[ - tensorboard.Tensorboard(), - tensorboard.Tensorboard(), - tensorboard.Tensorboard(), - ], - next_page_token='abc', - ), - tensorboard_service.ListTensorboardsResponse( - tensorboards=[], - next_page_token='def', - ), - tensorboard_service.ListTensorboardsResponse( - tensorboards=[ - tensorboard.Tensorboard(), - ], - next_page_token='ghi', - ), - tensorboard_service.ListTensorboardsResponse( - tensorboards=[ - tensorboard.Tensorboard(), - tensorboard.Tensorboard(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_tensorboards(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, tensorboard.Tensorboard) - for i in responses) - -@pytest.mark.asyncio -async def test_list_tensorboards_async_pages(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboards), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - tensorboard_service.ListTensorboardsResponse( - tensorboards=[ - tensorboard.Tensorboard(), - tensorboard.Tensorboard(), - tensorboard.Tensorboard(), - ], - next_page_token='abc', - ), - tensorboard_service.ListTensorboardsResponse( - tensorboards=[], - next_page_token='def', - ), - tensorboard_service.ListTensorboardsResponse( - tensorboards=[ - tensorboard.Tensorboard(), - ], - next_page_token='ghi', - ), - tensorboard_service.ListTensorboardsResponse( - tensorboards=[ - tensorboard.Tensorboard(), - tensorboard.Tensorboard(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_tensorboards(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_delete_tensorboard(transport: str = 'grpc', request_type=tensorboard_service.DeleteTensorboardRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_tensorboard(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.DeleteTensorboardRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_tensorboard_from_dict(): - test_delete_tensorboard(request_type=dict) - - -def test_delete_tensorboard_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard), - '__call__') as call: - client.delete_tensorboard() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.DeleteTensorboardRequest() - - -@pytest.mark.asyncio -async def test_delete_tensorboard_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.DeleteTensorboardRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_tensorboard(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.DeleteTensorboardRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_tensorboard_async_from_dict(): - await test_delete_tensorboard_async(request_type=dict) - - -def test_delete_tensorboard_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.DeleteTensorboardRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_tensorboard(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_tensorboard_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.DeleteTensorboardRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_tensorboard(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_tensorboard_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_tensorboard( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_delete_tensorboard_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_tensorboard( - tensorboard_service.DeleteTensorboardRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_tensorboard_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_tensorboard( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_tensorboard_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_tensorboard( - tensorboard_service.DeleteTensorboardRequest(), - name='name_value', - ) - - -def test_create_tensorboard_experiment(transport: str = 'grpc', request_type=tensorboard_service.CreateTensorboardExperimentRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_experiment), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_tensorboard_experiment.TensorboardExperiment( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - source='source_value', - ) - response = client.create_tensorboard_experiment(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.CreateTensorboardExperimentRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_tensorboard_experiment.TensorboardExperiment) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - assert response.source == 'source_value' - - -def test_create_tensorboard_experiment_from_dict(): - test_create_tensorboard_experiment(request_type=dict) - - -def test_create_tensorboard_experiment_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_experiment), - '__call__') as call: - client.create_tensorboard_experiment() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.CreateTensorboardExperimentRequest() - - -@pytest.mark.asyncio -async def test_create_tensorboard_experiment_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.CreateTensorboardExperimentRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_experiment), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_experiment.TensorboardExperiment( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - source='source_value', - )) - response = await client.create_tensorboard_experiment(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.CreateTensorboardExperimentRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_tensorboard_experiment.TensorboardExperiment) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - assert response.source == 'source_value' - - -@pytest.mark.asyncio -async def test_create_tensorboard_experiment_async_from_dict(): - await test_create_tensorboard_experiment_async(request_type=dict) - - -def test_create_tensorboard_experiment_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.CreateTensorboardExperimentRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_experiment), - '__call__') as call: - call.return_value = gca_tensorboard_experiment.TensorboardExperiment() - client.create_tensorboard_experiment(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_tensorboard_experiment_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.CreateTensorboardExperimentRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_experiment), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_experiment.TensorboardExperiment()) - await client.create_tensorboard_experiment(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_tensorboard_experiment_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_experiment), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_tensorboard_experiment.TensorboardExperiment() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_tensorboard_experiment( - parent='parent_value', - tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment(name='name_value'), - tensorboard_experiment_id='tensorboard_experiment_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].tensorboard_experiment - mock_val = gca_tensorboard_experiment.TensorboardExperiment(name='name_value') - assert arg == mock_val - arg = args[0].tensorboard_experiment_id - mock_val = 'tensorboard_experiment_id_value' - assert arg == mock_val - - -def test_create_tensorboard_experiment_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_tensorboard_experiment( - tensorboard_service.CreateTensorboardExperimentRequest(), - parent='parent_value', - tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment(name='name_value'), - tensorboard_experiment_id='tensorboard_experiment_id_value', - ) - - -@pytest.mark.asyncio -async def test_create_tensorboard_experiment_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_experiment), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_tensorboard_experiment.TensorboardExperiment() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_experiment.TensorboardExperiment()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_tensorboard_experiment( - parent='parent_value', - tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment(name='name_value'), - tensorboard_experiment_id='tensorboard_experiment_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].tensorboard_experiment - mock_val = gca_tensorboard_experiment.TensorboardExperiment(name='name_value') - assert arg == mock_val - arg = args[0].tensorboard_experiment_id - mock_val = 'tensorboard_experiment_id_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_tensorboard_experiment_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_tensorboard_experiment( - tensorboard_service.CreateTensorboardExperimentRequest(), - parent='parent_value', - tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment(name='name_value'), - tensorboard_experiment_id='tensorboard_experiment_id_value', - ) - - -def test_get_tensorboard_experiment(transport: str = 'grpc', request_type=tensorboard_service.GetTensorboardExperimentRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_experiment), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_experiment.TensorboardExperiment( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - source='source_value', - ) - response = client.get_tensorboard_experiment(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.GetTensorboardExperimentRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, tensorboard_experiment.TensorboardExperiment) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - assert response.source == 'source_value' - - -def test_get_tensorboard_experiment_from_dict(): - test_get_tensorboard_experiment(request_type=dict) - - -def test_get_tensorboard_experiment_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_experiment), - '__call__') as call: - client.get_tensorboard_experiment() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.GetTensorboardExperimentRequest() - - -@pytest.mark.asyncio -async def test_get_tensorboard_experiment_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.GetTensorboardExperimentRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_experiment), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_experiment.TensorboardExperiment( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - source='source_value', - )) - response = await client.get_tensorboard_experiment(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.GetTensorboardExperimentRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, tensorboard_experiment.TensorboardExperiment) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - assert response.source == 'source_value' - - -@pytest.mark.asyncio -async def test_get_tensorboard_experiment_async_from_dict(): - await test_get_tensorboard_experiment_async(request_type=dict) - - -def test_get_tensorboard_experiment_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.GetTensorboardExperimentRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_experiment), - '__call__') as call: - call.return_value = tensorboard_experiment.TensorboardExperiment() - client.get_tensorboard_experiment(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_tensorboard_experiment_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.GetTensorboardExperimentRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_experiment), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_experiment.TensorboardExperiment()) - await client.get_tensorboard_experiment(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_tensorboard_experiment_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_experiment), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_experiment.TensorboardExperiment() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_tensorboard_experiment( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_tensorboard_experiment_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_tensorboard_experiment( - tensorboard_service.GetTensorboardExperimentRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_tensorboard_experiment_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_experiment), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_experiment.TensorboardExperiment() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_experiment.TensorboardExperiment()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_tensorboard_experiment( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_tensorboard_experiment_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_tensorboard_experiment( - tensorboard_service.GetTensorboardExperimentRequest(), - name='name_value', - ) - - -def test_update_tensorboard_experiment(transport: str = 'grpc', request_type=tensorboard_service.UpdateTensorboardExperimentRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_experiment), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_tensorboard_experiment.TensorboardExperiment( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - source='source_value', - ) - response = client.update_tensorboard_experiment(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.UpdateTensorboardExperimentRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_tensorboard_experiment.TensorboardExperiment) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - assert response.source == 'source_value' - - -def test_update_tensorboard_experiment_from_dict(): - test_update_tensorboard_experiment(request_type=dict) - - -def test_update_tensorboard_experiment_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_experiment), - '__call__') as call: - client.update_tensorboard_experiment() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.UpdateTensorboardExperimentRequest() - - -@pytest.mark.asyncio -async def test_update_tensorboard_experiment_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.UpdateTensorboardExperimentRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_experiment), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_experiment.TensorboardExperiment( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - source='source_value', - )) - response = await client.update_tensorboard_experiment(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.UpdateTensorboardExperimentRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_tensorboard_experiment.TensorboardExperiment) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - assert response.source == 'source_value' - - -@pytest.mark.asyncio -async def test_update_tensorboard_experiment_async_from_dict(): - await test_update_tensorboard_experiment_async(request_type=dict) - - -def test_update_tensorboard_experiment_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.UpdateTensorboardExperimentRequest() - - request.tensorboard_experiment.name = 'tensorboard_experiment.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_experiment), - '__call__') as call: - call.return_value = gca_tensorboard_experiment.TensorboardExperiment() - client.update_tensorboard_experiment(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'tensorboard_experiment.name=tensorboard_experiment.name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_tensorboard_experiment_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.UpdateTensorboardExperimentRequest() - - request.tensorboard_experiment.name = 'tensorboard_experiment.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_experiment), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_experiment.TensorboardExperiment()) - await client.update_tensorboard_experiment(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'tensorboard_experiment.name=tensorboard_experiment.name/value', - ) in kw['metadata'] - - -def test_update_tensorboard_experiment_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_experiment), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_tensorboard_experiment.TensorboardExperiment() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_tensorboard_experiment( - tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].tensorboard_experiment - mock_val = gca_tensorboard_experiment.TensorboardExperiment(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -def test_update_tensorboard_experiment_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_tensorboard_experiment( - tensorboard_service.UpdateTensorboardExperimentRequest(), - tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.asyncio -async def test_update_tensorboard_experiment_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_experiment), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_tensorboard_experiment.TensorboardExperiment() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_experiment.TensorboardExperiment()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_tensorboard_experiment( - tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].tensorboard_experiment - mock_val = gca_tensorboard_experiment.TensorboardExperiment(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_update_tensorboard_experiment_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_tensorboard_experiment( - tensorboard_service.UpdateTensorboardExperimentRequest(), - tensorboard_experiment=gca_tensorboard_experiment.TensorboardExperiment(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_list_tensorboard_experiments(transport: str = 'grpc', request_type=tensorboard_service.ListTensorboardExperimentsRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_experiments), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.ListTensorboardExperimentsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_tensorboard_experiments(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ListTensorboardExperimentsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListTensorboardExperimentsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_tensorboard_experiments_from_dict(): - test_list_tensorboard_experiments(request_type=dict) - - -def test_list_tensorboard_experiments_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_experiments), - '__call__') as call: - client.list_tensorboard_experiments() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ListTensorboardExperimentsRequest() - - -@pytest.mark.asyncio -async def test_list_tensorboard_experiments_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.ListTensorboardExperimentsRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_experiments), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardExperimentsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_tensorboard_experiments(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ListTensorboardExperimentsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListTensorboardExperimentsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_tensorboard_experiments_async_from_dict(): - await test_list_tensorboard_experiments_async(request_type=dict) - - -def test_list_tensorboard_experiments_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.ListTensorboardExperimentsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_experiments), - '__call__') as call: - call.return_value = tensorboard_service.ListTensorboardExperimentsResponse() - client.list_tensorboard_experiments(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_tensorboard_experiments_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.ListTensorboardExperimentsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_experiments), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardExperimentsResponse()) - await client.list_tensorboard_experiments(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_tensorboard_experiments_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_experiments), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.ListTensorboardExperimentsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_tensorboard_experiments( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_tensorboard_experiments_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_tensorboard_experiments( - tensorboard_service.ListTensorboardExperimentsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_tensorboard_experiments_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_experiments), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.ListTensorboardExperimentsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardExperimentsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_tensorboard_experiments( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_tensorboard_experiments_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_tensorboard_experiments( - tensorboard_service.ListTensorboardExperimentsRequest(), - parent='parent_value', - ) - - -def test_list_tensorboard_experiments_pager(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_experiments), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - tensorboard_service.ListTensorboardExperimentsResponse( - tensorboard_experiments=[ - tensorboard_experiment.TensorboardExperiment(), - tensorboard_experiment.TensorboardExperiment(), - tensorboard_experiment.TensorboardExperiment(), - ], - next_page_token='abc', - ), - tensorboard_service.ListTensorboardExperimentsResponse( - tensorboard_experiments=[], - next_page_token='def', - ), - tensorboard_service.ListTensorboardExperimentsResponse( - tensorboard_experiments=[ - tensorboard_experiment.TensorboardExperiment(), - ], - next_page_token='ghi', - ), - tensorboard_service.ListTensorboardExperimentsResponse( - tensorboard_experiments=[ - tensorboard_experiment.TensorboardExperiment(), - tensorboard_experiment.TensorboardExperiment(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_tensorboard_experiments(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, tensorboard_experiment.TensorboardExperiment) - for i in results) - -def test_list_tensorboard_experiments_pages(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_experiments), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - tensorboard_service.ListTensorboardExperimentsResponse( - tensorboard_experiments=[ - tensorboard_experiment.TensorboardExperiment(), - tensorboard_experiment.TensorboardExperiment(), - tensorboard_experiment.TensorboardExperiment(), - ], - next_page_token='abc', - ), - tensorboard_service.ListTensorboardExperimentsResponse( - tensorboard_experiments=[], - next_page_token='def', - ), - tensorboard_service.ListTensorboardExperimentsResponse( - tensorboard_experiments=[ - tensorboard_experiment.TensorboardExperiment(), - ], - next_page_token='ghi', - ), - tensorboard_service.ListTensorboardExperimentsResponse( - tensorboard_experiments=[ - tensorboard_experiment.TensorboardExperiment(), - tensorboard_experiment.TensorboardExperiment(), - ], - ), - RuntimeError, - ) - pages = list(client.list_tensorboard_experiments(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_tensorboard_experiments_async_pager(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_experiments), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - tensorboard_service.ListTensorboardExperimentsResponse( - tensorboard_experiments=[ - tensorboard_experiment.TensorboardExperiment(), - tensorboard_experiment.TensorboardExperiment(), - tensorboard_experiment.TensorboardExperiment(), - ], - next_page_token='abc', - ), - tensorboard_service.ListTensorboardExperimentsResponse( - tensorboard_experiments=[], - next_page_token='def', - ), - tensorboard_service.ListTensorboardExperimentsResponse( - tensorboard_experiments=[ - tensorboard_experiment.TensorboardExperiment(), - ], - next_page_token='ghi', - ), - tensorboard_service.ListTensorboardExperimentsResponse( - tensorboard_experiments=[ - tensorboard_experiment.TensorboardExperiment(), - tensorboard_experiment.TensorboardExperiment(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_tensorboard_experiments(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, tensorboard_experiment.TensorboardExperiment) - for i in responses) - -@pytest.mark.asyncio -async def test_list_tensorboard_experiments_async_pages(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_experiments), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - tensorboard_service.ListTensorboardExperimentsResponse( - tensorboard_experiments=[ - tensorboard_experiment.TensorboardExperiment(), - tensorboard_experiment.TensorboardExperiment(), - tensorboard_experiment.TensorboardExperiment(), - ], - next_page_token='abc', - ), - tensorboard_service.ListTensorboardExperimentsResponse( - tensorboard_experiments=[], - next_page_token='def', - ), - tensorboard_service.ListTensorboardExperimentsResponse( - tensorboard_experiments=[ - tensorboard_experiment.TensorboardExperiment(), - ], - next_page_token='ghi', - ), - tensorboard_service.ListTensorboardExperimentsResponse( - tensorboard_experiments=[ - tensorboard_experiment.TensorboardExperiment(), - tensorboard_experiment.TensorboardExperiment(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_tensorboard_experiments(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_delete_tensorboard_experiment(transport: str = 'grpc', request_type=tensorboard_service.DeleteTensorboardExperimentRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_experiment), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_tensorboard_experiment(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.DeleteTensorboardExperimentRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_tensorboard_experiment_from_dict(): - test_delete_tensorboard_experiment(request_type=dict) - - -def test_delete_tensorboard_experiment_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_experiment), - '__call__') as call: - client.delete_tensorboard_experiment() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.DeleteTensorboardExperimentRequest() - - -@pytest.mark.asyncio -async def test_delete_tensorboard_experiment_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.DeleteTensorboardExperimentRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_experiment), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_tensorboard_experiment(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.DeleteTensorboardExperimentRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_tensorboard_experiment_async_from_dict(): - await test_delete_tensorboard_experiment_async(request_type=dict) - - -def test_delete_tensorboard_experiment_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.DeleteTensorboardExperimentRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_experiment), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_tensorboard_experiment(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_tensorboard_experiment_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.DeleteTensorboardExperimentRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_experiment), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_tensorboard_experiment(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_tensorboard_experiment_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_experiment), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_tensorboard_experiment( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_delete_tensorboard_experiment_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_tensorboard_experiment( - tensorboard_service.DeleteTensorboardExperimentRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_tensorboard_experiment_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_experiment), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_tensorboard_experiment( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_tensorboard_experiment_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_tensorboard_experiment( - tensorboard_service.DeleteTensorboardExperimentRequest(), - name='name_value', - ) - - -def test_create_tensorboard_run(transport: str = 'grpc', request_type=tensorboard_service.CreateTensorboardRunRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_run), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_tensorboard_run.TensorboardRun( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - ) - response = client.create_tensorboard_run(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.CreateTensorboardRunRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_tensorboard_run.TensorboardRun) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - - -def test_create_tensorboard_run_from_dict(): - test_create_tensorboard_run(request_type=dict) - - -def test_create_tensorboard_run_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_run), - '__call__') as call: - client.create_tensorboard_run() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.CreateTensorboardRunRequest() - - -@pytest.mark.asyncio -async def test_create_tensorboard_run_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.CreateTensorboardRunRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_run), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_run.TensorboardRun( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - )) - response = await client.create_tensorboard_run(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.CreateTensorboardRunRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_tensorboard_run.TensorboardRun) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_create_tensorboard_run_async_from_dict(): - await test_create_tensorboard_run_async(request_type=dict) - - -def test_create_tensorboard_run_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.CreateTensorboardRunRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_run), - '__call__') as call: - call.return_value = gca_tensorboard_run.TensorboardRun() - client.create_tensorboard_run(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_tensorboard_run_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.CreateTensorboardRunRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_run), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_run.TensorboardRun()) - await client.create_tensorboard_run(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_tensorboard_run_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_run), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_tensorboard_run.TensorboardRun() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_tensorboard_run( - parent='parent_value', - tensorboard_run=gca_tensorboard_run.TensorboardRun(name='name_value'), - tensorboard_run_id='tensorboard_run_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].tensorboard_run - mock_val = gca_tensorboard_run.TensorboardRun(name='name_value') - assert arg == mock_val - arg = args[0].tensorboard_run_id - mock_val = 'tensorboard_run_id_value' - assert arg == mock_val - - -def test_create_tensorboard_run_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_tensorboard_run( - tensorboard_service.CreateTensorboardRunRequest(), - parent='parent_value', - tensorboard_run=gca_tensorboard_run.TensorboardRun(name='name_value'), - tensorboard_run_id='tensorboard_run_id_value', - ) - - -@pytest.mark.asyncio -async def test_create_tensorboard_run_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_run), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_tensorboard_run.TensorboardRun() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_run.TensorboardRun()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_tensorboard_run( - parent='parent_value', - tensorboard_run=gca_tensorboard_run.TensorboardRun(name='name_value'), - tensorboard_run_id='tensorboard_run_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].tensorboard_run - mock_val = gca_tensorboard_run.TensorboardRun(name='name_value') - assert arg == mock_val - arg = args[0].tensorboard_run_id - mock_val = 'tensorboard_run_id_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_tensorboard_run_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_tensorboard_run( - tensorboard_service.CreateTensorboardRunRequest(), - parent='parent_value', - tensorboard_run=gca_tensorboard_run.TensorboardRun(name='name_value'), - tensorboard_run_id='tensorboard_run_id_value', - ) - - -def test_batch_create_tensorboard_runs(transport: str = 'grpc', request_type=tensorboard_service.BatchCreateTensorboardRunsRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_create_tensorboard_runs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.BatchCreateTensorboardRunsResponse( - ) - response = client.batch_create_tensorboard_runs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.BatchCreateTensorboardRunsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, tensorboard_service.BatchCreateTensorboardRunsResponse) - - -def test_batch_create_tensorboard_runs_from_dict(): - test_batch_create_tensorboard_runs(request_type=dict) - - -def test_batch_create_tensorboard_runs_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_create_tensorboard_runs), - '__call__') as call: - client.batch_create_tensorboard_runs() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.BatchCreateTensorboardRunsRequest() - - -@pytest.mark.asyncio -async def test_batch_create_tensorboard_runs_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.BatchCreateTensorboardRunsRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_create_tensorboard_runs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.BatchCreateTensorboardRunsResponse( - )) - response = await client.batch_create_tensorboard_runs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.BatchCreateTensorboardRunsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, tensorboard_service.BatchCreateTensorboardRunsResponse) - - -@pytest.mark.asyncio -async def test_batch_create_tensorboard_runs_async_from_dict(): - await test_batch_create_tensorboard_runs_async(request_type=dict) - - -def test_batch_create_tensorboard_runs_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.BatchCreateTensorboardRunsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_create_tensorboard_runs), - '__call__') as call: - call.return_value = tensorboard_service.BatchCreateTensorboardRunsResponse() - client.batch_create_tensorboard_runs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_batch_create_tensorboard_runs_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.BatchCreateTensorboardRunsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_create_tensorboard_runs), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.BatchCreateTensorboardRunsResponse()) - await client.batch_create_tensorboard_runs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_batch_create_tensorboard_runs_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_create_tensorboard_runs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.BatchCreateTensorboardRunsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.batch_create_tensorboard_runs( - parent='parent_value', - requests=[tensorboard_service.CreateTensorboardRunRequest(parent='parent_value')], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].requests - mock_val = [tensorboard_service.CreateTensorboardRunRequest(parent='parent_value')] - assert arg == mock_val - - -def test_batch_create_tensorboard_runs_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.batch_create_tensorboard_runs( - tensorboard_service.BatchCreateTensorboardRunsRequest(), - parent='parent_value', - requests=[tensorboard_service.CreateTensorboardRunRequest(parent='parent_value')], - ) - - -@pytest.mark.asyncio -async def test_batch_create_tensorboard_runs_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_create_tensorboard_runs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.BatchCreateTensorboardRunsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.BatchCreateTensorboardRunsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.batch_create_tensorboard_runs( - parent='parent_value', - requests=[tensorboard_service.CreateTensorboardRunRequest(parent='parent_value')], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].requests - mock_val = [tensorboard_service.CreateTensorboardRunRequest(parent='parent_value')] - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_batch_create_tensorboard_runs_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.batch_create_tensorboard_runs( - tensorboard_service.BatchCreateTensorboardRunsRequest(), - parent='parent_value', - requests=[tensorboard_service.CreateTensorboardRunRequest(parent='parent_value')], - ) - - -def test_get_tensorboard_run(transport: str = 'grpc', request_type=tensorboard_service.GetTensorboardRunRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_run), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_run.TensorboardRun( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - ) - response = client.get_tensorboard_run(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.GetTensorboardRunRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, tensorboard_run.TensorboardRun) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - - -def test_get_tensorboard_run_from_dict(): - test_get_tensorboard_run(request_type=dict) - - -def test_get_tensorboard_run_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_run), - '__call__') as call: - client.get_tensorboard_run() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.GetTensorboardRunRequest() - - -@pytest.mark.asyncio -async def test_get_tensorboard_run_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.GetTensorboardRunRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_run), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_run.TensorboardRun( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - )) - response = await client.get_tensorboard_run(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.GetTensorboardRunRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, tensorboard_run.TensorboardRun) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_get_tensorboard_run_async_from_dict(): - await test_get_tensorboard_run_async(request_type=dict) - - -def test_get_tensorboard_run_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.GetTensorboardRunRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_run), - '__call__') as call: - call.return_value = tensorboard_run.TensorboardRun() - client.get_tensorboard_run(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_tensorboard_run_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.GetTensorboardRunRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_run), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_run.TensorboardRun()) - await client.get_tensorboard_run(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_tensorboard_run_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_run), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_run.TensorboardRun() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_tensorboard_run( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_tensorboard_run_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_tensorboard_run( - tensorboard_service.GetTensorboardRunRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_tensorboard_run_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_run), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_run.TensorboardRun() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_run.TensorboardRun()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_tensorboard_run( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_tensorboard_run_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_tensorboard_run( - tensorboard_service.GetTensorboardRunRequest(), - name='name_value', - ) - - -def test_update_tensorboard_run(transport: str = 'grpc', request_type=tensorboard_service.UpdateTensorboardRunRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_run), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_tensorboard_run.TensorboardRun( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - ) - response = client.update_tensorboard_run(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.UpdateTensorboardRunRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_tensorboard_run.TensorboardRun) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - - -def test_update_tensorboard_run_from_dict(): - test_update_tensorboard_run(request_type=dict) - - -def test_update_tensorboard_run_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_run), - '__call__') as call: - client.update_tensorboard_run() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.UpdateTensorboardRunRequest() - - -@pytest.mark.asyncio -async def test_update_tensorboard_run_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.UpdateTensorboardRunRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_run), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_run.TensorboardRun( - name='name_value', - display_name='display_name_value', - description='description_value', - etag='etag_value', - )) - response = await client.update_tensorboard_run(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.UpdateTensorboardRunRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_tensorboard_run.TensorboardRun) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.etag == 'etag_value' - - -@pytest.mark.asyncio -async def test_update_tensorboard_run_async_from_dict(): - await test_update_tensorboard_run_async(request_type=dict) - - -def test_update_tensorboard_run_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.UpdateTensorboardRunRequest() - - request.tensorboard_run.name = 'tensorboard_run.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_run), - '__call__') as call: - call.return_value = gca_tensorboard_run.TensorboardRun() - client.update_tensorboard_run(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'tensorboard_run.name=tensorboard_run.name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_tensorboard_run_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.UpdateTensorboardRunRequest() - - request.tensorboard_run.name = 'tensorboard_run.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_run), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_run.TensorboardRun()) - await client.update_tensorboard_run(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'tensorboard_run.name=tensorboard_run.name/value', - ) in kw['metadata'] - - -def test_update_tensorboard_run_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_run), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_tensorboard_run.TensorboardRun() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_tensorboard_run( - tensorboard_run=gca_tensorboard_run.TensorboardRun(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].tensorboard_run - mock_val = gca_tensorboard_run.TensorboardRun(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -def test_update_tensorboard_run_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_tensorboard_run( - tensorboard_service.UpdateTensorboardRunRequest(), - tensorboard_run=gca_tensorboard_run.TensorboardRun(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.asyncio -async def test_update_tensorboard_run_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_run), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_tensorboard_run.TensorboardRun() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_run.TensorboardRun()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_tensorboard_run( - tensorboard_run=gca_tensorboard_run.TensorboardRun(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].tensorboard_run - mock_val = gca_tensorboard_run.TensorboardRun(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_update_tensorboard_run_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_tensorboard_run( - tensorboard_service.UpdateTensorboardRunRequest(), - tensorboard_run=gca_tensorboard_run.TensorboardRun(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_list_tensorboard_runs(transport: str = 'grpc', request_type=tensorboard_service.ListTensorboardRunsRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_runs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.ListTensorboardRunsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_tensorboard_runs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ListTensorboardRunsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListTensorboardRunsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_tensorboard_runs_from_dict(): - test_list_tensorboard_runs(request_type=dict) - - -def test_list_tensorboard_runs_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_runs), - '__call__') as call: - client.list_tensorboard_runs() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ListTensorboardRunsRequest() - - -@pytest.mark.asyncio -async def test_list_tensorboard_runs_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.ListTensorboardRunsRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_runs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardRunsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_tensorboard_runs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ListTensorboardRunsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListTensorboardRunsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_tensorboard_runs_async_from_dict(): - await test_list_tensorboard_runs_async(request_type=dict) - - -def test_list_tensorboard_runs_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.ListTensorboardRunsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_runs), - '__call__') as call: - call.return_value = tensorboard_service.ListTensorboardRunsResponse() - client.list_tensorboard_runs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_tensorboard_runs_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.ListTensorboardRunsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_runs), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardRunsResponse()) - await client.list_tensorboard_runs(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_tensorboard_runs_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_runs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.ListTensorboardRunsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_tensorboard_runs( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_tensorboard_runs_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_tensorboard_runs( - tensorboard_service.ListTensorboardRunsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_tensorboard_runs_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_runs), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.ListTensorboardRunsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardRunsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_tensorboard_runs( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_tensorboard_runs_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_tensorboard_runs( - tensorboard_service.ListTensorboardRunsRequest(), - parent='parent_value', - ) - - -def test_list_tensorboard_runs_pager(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_runs), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - tensorboard_service.ListTensorboardRunsResponse( - tensorboard_runs=[ - tensorboard_run.TensorboardRun(), - tensorboard_run.TensorboardRun(), - tensorboard_run.TensorboardRun(), - ], - next_page_token='abc', - ), - tensorboard_service.ListTensorboardRunsResponse( - tensorboard_runs=[], - next_page_token='def', - ), - tensorboard_service.ListTensorboardRunsResponse( - tensorboard_runs=[ - tensorboard_run.TensorboardRun(), - ], - next_page_token='ghi', - ), - tensorboard_service.ListTensorboardRunsResponse( - tensorboard_runs=[ - tensorboard_run.TensorboardRun(), - tensorboard_run.TensorboardRun(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_tensorboard_runs(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, tensorboard_run.TensorboardRun) - for i in results) - -def test_list_tensorboard_runs_pages(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_runs), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - tensorboard_service.ListTensorboardRunsResponse( - tensorboard_runs=[ - tensorboard_run.TensorboardRun(), - tensorboard_run.TensorboardRun(), - tensorboard_run.TensorboardRun(), - ], - next_page_token='abc', - ), - tensorboard_service.ListTensorboardRunsResponse( - tensorboard_runs=[], - next_page_token='def', - ), - tensorboard_service.ListTensorboardRunsResponse( - tensorboard_runs=[ - tensorboard_run.TensorboardRun(), - ], - next_page_token='ghi', - ), - tensorboard_service.ListTensorboardRunsResponse( - tensorboard_runs=[ - tensorboard_run.TensorboardRun(), - tensorboard_run.TensorboardRun(), - ], - ), - RuntimeError, - ) - pages = list(client.list_tensorboard_runs(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_tensorboard_runs_async_pager(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_runs), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - tensorboard_service.ListTensorboardRunsResponse( - tensorboard_runs=[ - tensorboard_run.TensorboardRun(), - tensorboard_run.TensorboardRun(), - tensorboard_run.TensorboardRun(), - ], - next_page_token='abc', - ), - tensorboard_service.ListTensorboardRunsResponse( - tensorboard_runs=[], - next_page_token='def', - ), - tensorboard_service.ListTensorboardRunsResponse( - tensorboard_runs=[ - tensorboard_run.TensorboardRun(), - ], - next_page_token='ghi', - ), - tensorboard_service.ListTensorboardRunsResponse( - tensorboard_runs=[ - tensorboard_run.TensorboardRun(), - tensorboard_run.TensorboardRun(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_tensorboard_runs(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, tensorboard_run.TensorboardRun) - for i in responses) - -@pytest.mark.asyncio -async def test_list_tensorboard_runs_async_pages(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_runs), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - tensorboard_service.ListTensorboardRunsResponse( - tensorboard_runs=[ - tensorboard_run.TensorboardRun(), - tensorboard_run.TensorboardRun(), - tensorboard_run.TensorboardRun(), - ], - next_page_token='abc', - ), - tensorboard_service.ListTensorboardRunsResponse( - tensorboard_runs=[], - next_page_token='def', - ), - tensorboard_service.ListTensorboardRunsResponse( - tensorboard_runs=[ - tensorboard_run.TensorboardRun(), - ], - next_page_token='ghi', - ), - tensorboard_service.ListTensorboardRunsResponse( - tensorboard_runs=[ - tensorboard_run.TensorboardRun(), - tensorboard_run.TensorboardRun(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_tensorboard_runs(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_delete_tensorboard_run(transport: str = 'grpc', request_type=tensorboard_service.DeleteTensorboardRunRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_run), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_tensorboard_run(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.DeleteTensorboardRunRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_tensorboard_run_from_dict(): - test_delete_tensorboard_run(request_type=dict) - - -def test_delete_tensorboard_run_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_run), - '__call__') as call: - client.delete_tensorboard_run() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.DeleteTensorboardRunRequest() - - -@pytest.mark.asyncio -async def test_delete_tensorboard_run_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.DeleteTensorboardRunRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_run), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_tensorboard_run(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.DeleteTensorboardRunRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_tensorboard_run_async_from_dict(): - await test_delete_tensorboard_run_async(request_type=dict) - - -def test_delete_tensorboard_run_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.DeleteTensorboardRunRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_run), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_tensorboard_run(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_tensorboard_run_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.DeleteTensorboardRunRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_run), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_tensorboard_run(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_tensorboard_run_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_run), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_tensorboard_run( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_delete_tensorboard_run_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_tensorboard_run( - tensorboard_service.DeleteTensorboardRunRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_tensorboard_run_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_run), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_tensorboard_run( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_tensorboard_run_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_tensorboard_run( - tensorboard_service.DeleteTensorboardRunRequest(), - name='name_value', - ) - - -def test_batch_create_tensorboard_time_series(transport: str = 'grpc', request_type=tensorboard_service.BatchCreateTensorboardTimeSeriesRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_create_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.BatchCreateTensorboardTimeSeriesResponse( - ) - response = client.batch_create_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.BatchCreateTensorboardTimeSeriesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, tensorboard_service.BatchCreateTensorboardTimeSeriesResponse) - - -def test_batch_create_tensorboard_time_series_from_dict(): - test_batch_create_tensorboard_time_series(request_type=dict) - - -def test_batch_create_tensorboard_time_series_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_create_tensorboard_time_series), - '__call__') as call: - client.batch_create_tensorboard_time_series() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.BatchCreateTensorboardTimeSeriesRequest() - - -@pytest.mark.asyncio -async def test_batch_create_tensorboard_time_series_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.BatchCreateTensorboardTimeSeriesRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_create_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.BatchCreateTensorboardTimeSeriesResponse( - )) - response = await client.batch_create_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.BatchCreateTensorboardTimeSeriesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, tensorboard_service.BatchCreateTensorboardTimeSeriesResponse) - - -@pytest.mark.asyncio -async def test_batch_create_tensorboard_time_series_async_from_dict(): - await test_batch_create_tensorboard_time_series_async(request_type=dict) - - -def test_batch_create_tensorboard_time_series_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.BatchCreateTensorboardTimeSeriesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_create_tensorboard_time_series), - '__call__') as call: - call.return_value = tensorboard_service.BatchCreateTensorboardTimeSeriesResponse() - client.batch_create_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_batch_create_tensorboard_time_series_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.BatchCreateTensorboardTimeSeriesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_create_tensorboard_time_series), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.BatchCreateTensorboardTimeSeriesResponse()) - await client.batch_create_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_batch_create_tensorboard_time_series_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_create_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.BatchCreateTensorboardTimeSeriesResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.batch_create_tensorboard_time_series( - parent='parent_value', - requests=[tensorboard_service.CreateTensorboardTimeSeriesRequest(parent='parent_value')], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].requests - mock_val = [tensorboard_service.CreateTensorboardTimeSeriesRequest(parent='parent_value')] - assert arg == mock_val - - -def test_batch_create_tensorboard_time_series_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.batch_create_tensorboard_time_series( - tensorboard_service.BatchCreateTensorboardTimeSeriesRequest(), - parent='parent_value', - requests=[tensorboard_service.CreateTensorboardTimeSeriesRequest(parent='parent_value')], - ) - - -@pytest.mark.asyncio -async def test_batch_create_tensorboard_time_series_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_create_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.BatchCreateTensorboardTimeSeriesResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.BatchCreateTensorboardTimeSeriesResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.batch_create_tensorboard_time_series( - parent='parent_value', - requests=[tensorboard_service.CreateTensorboardTimeSeriesRequest(parent='parent_value')], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].requests - mock_val = [tensorboard_service.CreateTensorboardTimeSeriesRequest(parent='parent_value')] - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_batch_create_tensorboard_time_series_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.batch_create_tensorboard_time_series( - tensorboard_service.BatchCreateTensorboardTimeSeriesRequest(), - parent='parent_value', - requests=[tensorboard_service.CreateTensorboardTimeSeriesRequest(parent='parent_value')], - ) - - -def test_create_tensorboard_time_series(transport: str = 'grpc', request_type=tensorboard_service.CreateTensorboardTimeSeriesRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries( - name='name_value', - display_name='display_name_value', - description='description_value', - value_type=gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR, - etag='etag_value', - plugin_name='plugin_name_value', - plugin_data=b'plugin_data_blob', - ) - response = client.create_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.CreateTensorboardTimeSeriesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_tensorboard_time_series.TensorboardTimeSeries) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.value_type == gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR - assert response.etag == 'etag_value' - assert response.plugin_name == 'plugin_name_value' - assert response.plugin_data == b'plugin_data_blob' - - -def test_create_tensorboard_time_series_from_dict(): - test_create_tensorboard_time_series(request_type=dict) - - -def test_create_tensorboard_time_series_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_time_series), - '__call__') as call: - client.create_tensorboard_time_series() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.CreateTensorboardTimeSeriesRequest() - - -@pytest.mark.asyncio -async def test_create_tensorboard_time_series_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.CreateTensorboardTimeSeriesRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_time_series.TensorboardTimeSeries( - name='name_value', - display_name='display_name_value', - description='description_value', - value_type=gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR, - etag='etag_value', - plugin_name='plugin_name_value', - plugin_data=b'plugin_data_blob', - )) - response = await client.create_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.CreateTensorboardTimeSeriesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_tensorboard_time_series.TensorboardTimeSeries) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.value_type == gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR - assert response.etag == 'etag_value' - assert response.plugin_name == 'plugin_name_value' - assert response.plugin_data == b'plugin_data_blob' - - -@pytest.mark.asyncio -async def test_create_tensorboard_time_series_async_from_dict(): - await test_create_tensorboard_time_series_async(request_type=dict) - - -def test_create_tensorboard_time_series_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.CreateTensorboardTimeSeriesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_time_series), - '__call__') as call: - call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries() - client.create_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_tensorboard_time_series_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.CreateTensorboardTimeSeriesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_time_series), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_time_series.TensorboardTimeSeries()) - await client.create_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_tensorboard_time_series_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_tensorboard_time_series( - parent='parent_value', - tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].tensorboard_time_series - mock_val = gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value') - assert arg == mock_val - - -def test_create_tensorboard_time_series_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_tensorboard_time_series( - tensorboard_service.CreateTensorboardTimeSeriesRequest(), - parent='parent_value', - tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value'), - ) - - -@pytest.mark.asyncio -async def test_create_tensorboard_time_series_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_time_series.TensorboardTimeSeries()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_tensorboard_time_series( - parent='parent_value', - tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].tensorboard_time_series - mock_val = gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value') - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_tensorboard_time_series_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_tensorboard_time_series( - tensorboard_service.CreateTensorboardTimeSeriesRequest(), - parent='parent_value', - tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value'), - ) - - -def test_get_tensorboard_time_series(transport: str = 'grpc', request_type=tensorboard_service.GetTensorboardTimeSeriesRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_time_series.TensorboardTimeSeries( - name='name_value', - display_name='display_name_value', - description='description_value', - value_type=tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR, - etag='etag_value', - plugin_name='plugin_name_value', - plugin_data=b'plugin_data_blob', - ) - response = client.get_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.GetTensorboardTimeSeriesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, tensorboard_time_series.TensorboardTimeSeries) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.value_type == tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR - assert response.etag == 'etag_value' - assert response.plugin_name == 'plugin_name_value' - assert response.plugin_data == b'plugin_data_blob' - - -def test_get_tensorboard_time_series_from_dict(): - test_get_tensorboard_time_series(request_type=dict) - - -def test_get_tensorboard_time_series_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_time_series), - '__call__') as call: - client.get_tensorboard_time_series() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.GetTensorboardTimeSeriesRequest() - - -@pytest.mark.asyncio -async def test_get_tensorboard_time_series_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.GetTensorboardTimeSeriesRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_time_series.TensorboardTimeSeries( - name='name_value', - display_name='display_name_value', - description='description_value', - value_type=tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR, - etag='etag_value', - plugin_name='plugin_name_value', - plugin_data=b'plugin_data_blob', - )) - response = await client.get_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.GetTensorboardTimeSeriesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, tensorboard_time_series.TensorboardTimeSeries) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.value_type == tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR - assert response.etag == 'etag_value' - assert response.plugin_name == 'plugin_name_value' - assert response.plugin_data == b'plugin_data_blob' - - -@pytest.mark.asyncio -async def test_get_tensorboard_time_series_async_from_dict(): - await test_get_tensorboard_time_series_async(request_type=dict) - - -def test_get_tensorboard_time_series_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.GetTensorboardTimeSeriesRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_time_series), - '__call__') as call: - call.return_value = tensorboard_time_series.TensorboardTimeSeries() - client.get_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_tensorboard_time_series_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.GetTensorboardTimeSeriesRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_time_series), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_time_series.TensorboardTimeSeries()) - await client.get_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_tensorboard_time_series_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_time_series.TensorboardTimeSeries() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_tensorboard_time_series( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_tensorboard_time_series_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_tensorboard_time_series( - tensorboard_service.GetTensorboardTimeSeriesRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_tensorboard_time_series_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_time_series.TensorboardTimeSeries() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_time_series.TensorboardTimeSeries()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_tensorboard_time_series( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_tensorboard_time_series_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_tensorboard_time_series( - tensorboard_service.GetTensorboardTimeSeriesRequest(), - name='name_value', - ) - - -def test_update_tensorboard_time_series(transport: str = 'grpc', request_type=tensorboard_service.UpdateTensorboardTimeSeriesRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries( - name='name_value', - display_name='display_name_value', - description='description_value', - value_type=gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR, - etag='etag_value', - plugin_name='plugin_name_value', - plugin_data=b'plugin_data_blob', - ) - response = client.update_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.UpdateTensorboardTimeSeriesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_tensorboard_time_series.TensorboardTimeSeries) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.value_type == gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR - assert response.etag == 'etag_value' - assert response.plugin_name == 'plugin_name_value' - assert response.plugin_data == b'plugin_data_blob' - - -def test_update_tensorboard_time_series_from_dict(): - test_update_tensorboard_time_series(request_type=dict) - - -def test_update_tensorboard_time_series_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_time_series), - '__call__') as call: - client.update_tensorboard_time_series() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.UpdateTensorboardTimeSeriesRequest() - - -@pytest.mark.asyncio -async def test_update_tensorboard_time_series_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.UpdateTensorboardTimeSeriesRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_time_series.TensorboardTimeSeries( - name='name_value', - display_name='display_name_value', - description='description_value', - value_type=gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR, - etag='etag_value', - plugin_name='plugin_name_value', - plugin_data=b'plugin_data_blob', - )) - response = await client.update_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.UpdateTensorboardTimeSeriesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_tensorboard_time_series.TensorboardTimeSeries) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.description == 'description_value' - assert response.value_type == gca_tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR - assert response.etag == 'etag_value' - assert response.plugin_name == 'plugin_name_value' - assert response.plugin_data == b'plugin_data_blob' - - -@pytest.mark.asyncio -async def test_update_tensorboard_time_series_async_from_dict(): - await test_update_tensorboard_time_series_async(request_type=dict) - - -def test_update_tensorboard_time_series_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.UpdateTensorboardTimeSeriesRequest() - - request.tensorboard_time_series.name = 'tensorboard_time_series.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_time_series), - '__call__') as call: - call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries() - client.update_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'tensorboard_time_series.name=tensorboard_time_series.name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_tensorboard_time_series_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.UpdateTensorboardTimeSeriesRequest() - - request.tensorboard_time_series.name = 'tensorboard_time_series.name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_time_series), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_time_series.TensorboardTimeSeries()) - await client.update_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'tensorboard_time_series.name=tensorboard_time_series.name/value', - ) in kw['metadata'] - - -def test_update_tensorboard_time_series_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_tensorboard_time_series( - tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].tensorboard_time_series - mock_val = gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -def test_update_tensorboard_time_series_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_tensorboard_time_series( - tensorboard_service.UpdateTensorboardTimeSeriesRequest(), - tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.asyncio -async def test_update_tensorboard_time_series_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_tensorboard_time_series.TensorboardTimeSeries() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_tensorboard_time_series.TensorboardTimeSeries()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_tensorboard_time_series( - tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].tensorboard_time_series - mock_val = gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_update_tensorboard_time_series_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_tensorboard_time_series( - tensorboard_service.UpdateTensorboardTimeSeriesRequest(), - tensorboard_time_series=gca_tensorboard_time_series.TensorboardTimeSeries(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_list_tensorboard_time_series(transport: str = 'grpc', request_type=tensorboard_service.ListTensorboardTimeSeriesRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.ListTensorboardTimeSeriesResponse( - next_page_token='next_page_token_value', - ) - response = client.list_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ListTensorboardTimeSeriesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListTensorboardTimeSeriesPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_tensorboard_time_series_from_dict(): - test_list_tensorboard_time_series(request_type=dict) - - -def test_list_tensorboard_time_series_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_time_series), - '__call__') as call: - client.list_tensorboard_time_series() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ListTensorboardTimeSeriesRequest() - - -@pytest.mark.asyncio -async def test_list_tensorboard_time_series_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.ListTensorboardTimeSeriesRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardTimeSeriesResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ListTensorboardTimeSeriesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListTensorboardTimeSeriesAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_tensorboard_time_series_async_from_dict(): - await test_list_tensorboard_time_series_async(request_type=dict) - - -def test_list_tensorboard_time_series_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.ListTensorboardTimeSeriesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_time_series), - '__call__') as call: - call.return_value = tensorboard_service.ListTensorboardTimeSeriesResponse() - client.list_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_tensorboard_time_series_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.ListTensorboardTimeSeriesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_time_series), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardTimeSeriesResponse()) - await client.list_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_tensorboard_time_series_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.ListTensorboardTimeSeriesResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_tensorboard_time_series( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_tensorboard_time_series_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_tensorboard_time_series( - tensorboard_service.ListTensorboardTimeSeriesRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_tensorboard_time_series_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.ListTensorboardTimeSeriesResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ListTensorboardTimeSeriesResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_tensorboard_time_series( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_tensorboard_time_series_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_tensorboard_time_series( - tensorboard_service.ListTensorboardTimeSeriesRequest(), - parent='parent_value', - ) - - -def test_list_tensorboard_time_series_pager(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_time_series), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - tensorboard_service.ListTensorboardTimeSeriesResponse( - tensorboard_time_series=[ - tensorboard_time_series.TensorboardTimeSeries(), - tensorboard_time_series.TensorboardTimeSeries(), - tensorboard_time_series.TensorboardTimeSeries(), - ], - next_page_token='abc', - ), - tensorboard_service.ListTensorboardTimeSeriesResponse( - tensorboard_time_series=[], - next_page_token='def', - ), - tensorboard_service.ListTensorboardTimeSeriesResponse( - tensorboard_time_series=[ - tensorboard_time_series.TensorboardTimeSeries(), - ], - next_page_token='ghi', - ), - tensorboard_service.ListTensorboardTimeSeriesResponse( - tensorboard_time_series=[ - tensorboard_time_series.TensorboardTimeSeries(), - tensorboard_time_series.TensorboardTimeSeries(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_tensorboard_time_series(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, tensorboard_time_series.TensorboardTimeSeries) - for i in results) - -def test_list_tensorboard_time_series_pages(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_time_series), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - tensorboard_service.ListTensorboardTimeSeriesResponse( - tensorboard_time_series=[ - tensorboard_time_series.TensorboardTimeSeries(), - tensorboard_time_series.TensorboardTimeSeries(), - tensorboard_time_series.TensorboardTimeSeries(), - ], - next_page_token='abc', - ), - tensorboard_service.ListTensorboardTimeSeriesResponse( - tensorboard_time_series=[], - next_page_token='def', - ), - tensorboard_service.ListTensorboardTimeSeriesResponse( - tensorboard_time_series=[ - tensorboard_time_series.TensorboardTimeSeries(), - ], - next_page_token='ghi', - ), - tensorboard_service.ListTensorboardTimeSeriesResponse( - tensorboard_time_series=[ - tensorboard_time_series.TensorboardTimeSeries(), - tensorboard_time_series.TensorboardTimeSeries(), - ], - ), - RuntimeError, - ) - pages = list(client.list_tensorboard_time_series(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_tensorboard_time_series_async_pager(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_time_series), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - tensorboard_service.ListTensorboardTimeSeriesResponse( - tensorboard_time_series=[ - tensorboard_time_series.TensorboardTimeSeries(), - tensorboard_time_series.TensorboardTimeSeries(), - tensorboard_time_series.TensorboardTimeSeries(), - ], - next_page_token='abc', - ), - tensorboard_service.ListTensorboardTimeSeriesResponse( - tensorboard_time_series=[], - next_page_token='def', - ), - tensorboard_service.ListTensorboardTimeSeriesResponse( - tensorboard_time_series=[ - tensorboard_time_series.TensorboardTimeSeries(), - ], - next_page_token='ghi', - ), - tensorboard_service.ListTensorboardTimeSeriesResponse( - tensorboard_time_series=[ - tensorboard_time_series.TensorboardTimeSeries(), - tensorboard_time_series.TensorboardTimeSeries(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_tensorboard_time_series(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, tensorboard_time_series.TensorboardTimeSeries) - for i in responses) - -@pytest.mark.asyncio -async def test_list_tensorboard_time_series_async_pages(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tensorboard_time_series), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - tensorboard_service.ListTensorboardTimeSeriesResponse( - tensorboard_time_series=[ - tensorboard_time_series.TensorboardTimeSeries(), - tensorboard_time_series.TensorboardTimeSeries(), - tensorboard_time_series.TensorboardTimeSeries(), - ], - next_page_token='abc', - ), - tensorboard_service.ListTensorboardTimeSeriesResponse( - tensorboard_time_series=[], - next_page_token='def', - ), - tensorboard_service.ListTensorboardTimeSeriesResponse( - tensorboard_time_series=[ - tensorboard_time_series.TensorboardTimeSeries(), - ], - next_page_token='ghi', - ), - tensorboard_service.ListTensorboardTimeSeriesResponse( - tensorboard_time_series=[ - tensorboard_time_series.TensorboardTimeSeries(), - tensorboard_time_series.TensorboardTimeSeries(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_tensorboard_time_series(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_delete_tensorboard_time_series(transport: str = 'grpc', request_type=tensorboard_service.DeleteTensorboardTimeSeriesRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.delete_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.DeleteTensorboardTimeSeriesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_delete_tensorboard_time_series_from_dict(): - test_delete_tensorboard_time_series(request_type=dict) - - -def test_delete_tensorboard_time_series_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_time_series), - '__call__') as call: - client.delete_tensorboard_time_series() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.DeleteTensorboardTimeSeriesRequest() - - -@pytest.mark.asyncio -async def test_delete_tensorboard_time_series_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.DeleteTensorboardTimeSeriesRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.delete_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.DeleteTensorboardTimeSeriesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_delete_tensorboard_time_series_async_from_dict(): - await test_delete_tensorboard_time_series_async(request_type=dict) - - -def test_delete_tensorboard_time_series_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.DeleteTensorboardTimeSeriesRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_time_series), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.delete_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_tensorboard_time_series_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.DeleteTensorboardTimeSeriesRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_time_series), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.delete_tensorboard_time_series(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_tensorboard_time_series_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_tensorboard_time_series( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_delete_tensorboard_time_series_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_tensorboard_time_series( - tensorboard_service.DeleteTensorboardTimeSeriesRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_tensorboard_time_series_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_tensorboard_time_series), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_tensorboard_time_series( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_tensorboard_time_series_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_tensorboard_time_series( - tensorboard_service.DeleteTensorboardTimeSeriesRequest(), - name='name_value', - ) - - -def test_batch_read_tensorboard_time_series_data(transport: str = 'grpc', request_type=tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_read_tensorboard_time_series_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse( - ) - response = client.batch_read_tensorboard_time_series_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse) - - -def test_batch_read_tensorboard_time_series_data_from_dict(): - test_batch_read_tensorboard_time_series_data(request_type=dict) - - -def test_batch_read_tensorboard_time_series_data_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_read_tensorboard_time_series_data), - '__call__') as call: - client.batch_read_tensorboard_time_series_data() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest() - - -@pytest.mark.asyncio -async def test_batch_read_tensorboard_time_series_data_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_read_tensorboard_time_series_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse( - )) - response = await client.batch_read_tensorboard_time_series_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse) - - -@pytest.mark.asyncio -async def test_batch_read_tensorboard_time_series_data_async_from_dict(): - await test_batch_read_tensorboard_time_series_data_async(request_type=dict) - - -def test_batch_read_tensorboard_time_series_data_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest() - - request.tensorboard = 'tensorboard/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_read_tensorboard_time_series_data), - '__call__') as call: - call.return_value = tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse() - client.batch_read_tensorboard_time_series_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'tensorboard=tensorboard/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_batch_read_tensorboard_time_series_data_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest() - - request.tensorboard = 'tensorboard/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_read_tensorboard_time_series_data), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse()) - await client.batch_read_tensorboard_time_series_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'tensorboard=tensorboard/value', - ) in kw['metadata'] - - -def test_batch_read_tensorboard_time_series_data_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_read_tensorboard_time_series_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.batch_read_tensorboard_time_series_data( - tensorboard='tensorboard_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].tensorboard - mock_val = 'tensorboard_value' - assert arg == mock_val - - -def test_batch_read_tensorboard_time_series_data_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.batch_read_tensorboard_time_series_data( - tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest(), - tensorboard='tensorboard_value', - ) - - -@pytest.mark.asyncio -async def test_batch_read_tensorboard_time_series_data_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.batch_read_tensorboard_time_series_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.BatchReadTensorboardTimeSeriesDataResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.batch_read_tensorboard_time_series_data( - tensorboard='tensorboard_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].tensorboard - mock_val = 'tensorboard_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_batch_read_tensorboard_time_series_data_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.batch_read_tensorboard_time_series_data( - tensorboard_service.BatchReadTensorboardTimeSeriesDataRequest(), - tensorboard='tensorboard_value', - ) - - -def test_read_tensorboard_time_series_data(transport: str = 'grpc', request_type=tensorboard_service.ReadTensorboardTimeSeriesDataRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_tensorboard_time_series_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.ReadTensorboardTimeSeriesDataResponse( - ) - response = client.read_tensorboard_time_series_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ReadTensorboardTimeSeriesDataRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, tensorboard_service.ReadTensorboardTimeSeriesDataResponse) - - -def test_read_tensorboard_time_series_data_from_dict(): - test_read_tensorboard_time_series_data(request_type=dict) - - -def test_read_tensorboard_time_series_data_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_tensorboard_time_series_data), - '__call__') as call: - client.read_tensorboard_time_series_data() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ReadTensorboardTimeSeriesDataRequest() - - -@pytest.mark.asyncio -async def test_read_tensorboard_time_series_data_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.ReadTensorboardTimeSeriesDataRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_tensorboard_time_series_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ReadTensorboardTimeSeriesDataResponse( - )) - response = await client.read_tensorboard_time_series_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ReadTensorboardTimeSeriesDataRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, tensorboard_service.ReadTensorboardTimeSeriesDataResponse) - - -@pytest.mark.asyncio -async def test_read_tensorboard_time_series_data_async_from_dict(): - await test_read_tensorboard_time_series_data_async(request_type=dict) - - -def test_read_tensorboard_time_series_data_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.ReadTensorboardTimeSeriesDataRequest() - - request.tensorboard_time_series = 'tensorboard_time_series/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_tensorboard_time_series_data), - '__call__') as call: - call.return_value = tensorboard_service.ReadTensorboardTimeSeriesDataResponse() - client.read_tensorboard_time_series_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'tensorboard_time_series=tensorboard_time_series/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_read_tensorboard_time_series_data_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.ReadTensorboardTimeSeriesDataRequest() - - request.tensorboard_time_series = 'tensorboard_time_series/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_tensorboard_time_series_data), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ReadTensorboardTimeSeriesDataResponse()) - await client.read_tensorboard_time_series_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'tensorboard_time_series=tensorboard_time_series/value', - ) in kw['metadata'] - - -def test_read_tensorboard_time_series_data_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_tensorboard_time_series_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.ReadTensorboardTimeSeriesDataResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.read_tensorboard_time_series_data( - tensorboard_time_series='tensorboard_time_series_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].tensorboard_time_series - mock_val = 'tensorboard_time_series_value' - assert arg == mock_val - - -def test_read_tensorboard_time_series_data_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.read_tensorboard_time_series_data( - tensorboard_service.ReadTensorboardTimeSeriesDataRequest(), - tensorboard_time_series='tensorboard_time_series_value', - ) - - -@pytest.mark.asyncio -async def test_read_tensorboard_time_series_data_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_tensorboard_time_series_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.ReadTensorboardTimeSeriesDataResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ReadTensorboardTimeSeriesDataResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.read_tensorboard_time_series_data( - tensorboard_time_series='tensorboard_time_series_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].tensorboard_time_series - mock_val = 'tensorboard_time_series_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_read_tensorboard_time_series_data_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.read_tensorboard_time_series_data( - tensorboard_service.ReadTensorboardTimeSeriesDataRequest(), - tensorboard_time_series='tensorboard_time_series_value', - ) - - -def test_read_tensorboard_blob_data(transport: str = 'grpc', request_type=tensorboard_service.ReadTensorboardBlobDataRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_tensorboard_blob_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = iter([tensorboard_service.ReadTensorboardBlobDataResponse()]) - response = client.read_tensorboard_blob_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ReadTensorboardBlobDataRequest() - - # Establish that the response is the type that we expect. - for message in response: - assert isinstance(message, tensorboard_service.ReadTensorboardBlobDataResponse) - - -def test_read_tensorboard_blob_data_from_dict(): - test_read_tensorboard_blob_data(request_type=dict) - - -def test_read_tensorboard_blob_data_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_tensorboard_blob_data), - '__call__') as call: - client.read_tensorboard_blob_data() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ReadTensorboardBlobDataRequest() - - -@pytest.mark.asyncio -async def test_read_tensorboard_blob_data_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.ReadTensorboardBlobDataRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_tensorboard_blob_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) - call.return_value.read = mock.AsyncMock(side_effect=[tensorboard_service.ReadTensorboardBlobDataResponse()]) - response = await client.read_tensorboard_blob_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ReadTensorboardBlobDataRequest() - - # Establish that the response is the type that we expect. - message = await response.read() - assert isinstance(message, tensorboard_service.ReadTensorboardBlobDataResponse) - - -@pytest.mark.asyncio -async def test_read_tensorboard_blob_data_async_from_dict(): - await test_read_tensorboard_blob_data_async(request_type=dict) - - -def test_read_tensorboard_blob_data_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.ReadTensorboardBlobDataRequest() - - request.time_series = 'time_series/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_tensorboard_blob_data), - '__call__') as call: - call.return_value = iter([tensorboard_service.ReadTensorboardBlobDataResponse()]) - client.read_tensorboard_blob_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'time_series=time_series/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_read_tensorboard_blob_data_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.ReadTensorboardBlobDataRequest() - - request.time_series = 'time_series/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_tensorboard_blob_data), - '__call__') as call: - call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) - call.return_value.read = mock.AsyncMock(side_effect=[tensorboard_service.ReadTensorboardBlobDataResponse()]) - await client.read_tensorboard_blob_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'time_series=time_series/value', - ) in kw['metadata'] - - -def test_read_tensorboard_blob_data_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_tensorboard_blob_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = iter([tensorboard_service.ReadTensorboardBlobDataResponse()]) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.read_tensorboard_blob_data( - time_series='time_series_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].time_series - mock_val = 'time_series_value' - assert arg == mock_val - - -def test_read_tensorboard_blob_data_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.read_tensorboard_blob_data( - tensorboard_service.ReadTensorboardBlobDataRequest(), - time_series='time_series_value', - ) - - -@pytest.mark.asyncio -async def test_read_tensorboard_blob_data_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_tensorboard_blob_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = iter([tensorboard_service.ReadTensorboardBlobDataResponse()]) - - call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.read_tensorboard_blob_data( - time_series='time_series_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].time_series - mock_val = 'time_series_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_read_tensorboard_blob_data_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.read_tensorboard_blob_data( - tensorboard_service.ReadTensorboardBlobDataRequest(), - time_series='time_series_value', - ) - - -def test_write_tensorboard_experiment_data(transport: str = 'grpc', request_type=tensorboard_service.WriteTensorboardExperimentDataRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.write_tensorboard_experiment_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.WriteTensorboardExperimentDataResponse( - ) - response = client.write_tensorboard_experiment_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.WriteTensorboardExperimentDataRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, tensorboard_service.WriteTensorboardExperimentDataResponse) - - -def test_write_tensorboard_experiment_data_from_dict(): - test_write_tensorboard_experiment_data(request_type=dict) - - -def test_write_tensorboard_experiment_data_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.write_tensorboard_experiment_data), - '__call__') as call: - client.write_tensorboard_experiment_data() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.WriteTensorboardExperimentDataRequest() - - -@pytest.mark.asyncio -async def test_write_tensorboard_experiment_data_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.WriteTensorboardExperimentDataRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.write_tensorboard_experiment_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.WriteTensorboardExperimentDataResponse( - )) - response = await client.write_tensorboard_experiment_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.WriteTensorboardExperimentDataRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, tensorboard_service.WriteTensorboardExperimentDataResponse) - - -@pytest.mark.asyncio -async def test_write_tensorboard_experiment_data_async_from_dict(): - await test_write_tensorboard_experiment_data_async(request_type=dict) - - -def test_write_tensorboard_experiment_data_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.WriteTensorboardExperimentDataRequest() - - request.tensorboard_experiment = 'tensorboard_experiment/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.write_tensorboard_experiment_data), - '__call__') as call: - call.return_value = tensorboard_service.WriteTensorboardExperimentDataResponse() - client.write_tensorboard_experiment_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'tensorboard_experiment=tensorboard_experiment/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_write_tensorboard_experiment_data_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.WriteTensorboardExperimentDataRequest() - - request.tensorboard_experiment = 'tensorboard_experiment/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.write_tensorboard_experiment_data), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.WriteTensorboardExperimentDataResponse()) - await client.write_tensorboard_experiment_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'tensorboard_experiment=tensorboard_experiment/value', - ) in kw['metadata'] - - -def test_write_tensorboard_experiment_data_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.write_tensorboard_experiment_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.WriteTensorboardExperimentDataResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.write_tensorboard_experiment_data( - tensorboard_experiment='tensorboard_experiment_value', - write_run_data_requests=[tensorboard_service.WriteTensorboardRunDataRequest(tensorboard_run='tensorboard_run_value')], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].tensorboard_experiment - mock_val = 'tensorboard_experiment_value' - assert arg == mock_val - arg = args[0].write_run_data_requests - mock_val = [tensorboard_service.WriteTensorboardRunDataRequest(tensorboard_run='tensorboard_run_value')] - assert arg == mock_val - - -def test_write_tensorboard_experiment_data_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.write_tensorboard_experiment_data( - tensorboard_service.WriteTensorboardExperimentDataRequest(), - tensorboard_experiment='tensorboard_experiment_value', - write_run_data_requests=[tensorboard_service.WriteTensorboardRunDataRequest(tensorboard_run='tensorboard_run_value')], - ) - - -@pytest.mark.asyncio -async def test_write_tensorboard_experiment_data_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.write_tensorboard_experiment_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.WriteTensorboardExperimentDataResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.WriteTensorboardExperimentDataResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.write_tensorboard_experiment_data( - tensorboard_experiment='tensorboard_experiment_value', - write_run_data_requests=[tensorboard_service.WriteTensorboardRunDataRequest(tensorboard_run='tensorboard_run_value')], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].tensorboard_experiment - mock_val = 'tensorboard_experiment_value' - assert arg == mock_val - arg = args[0].write_run_data_requests - mock_val = [tensorboard_service.WriteTensorboardRunDataRequest(tensorboard_run='tensorboard_run_value')] - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_write_tensorboard_experiment_data_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.write_tensorboard_experiment_data( - tensorboard_service.WriteTensorboardExperimentDataRequest(), - tensorboard_experiment='tensorboard_experiment_value', - write_run_data_requests=[tensorboard_service.WriteTensorboardRunDataRequest(tensorboard_run='tensorboard_run_value')], - ) - - -def test_write_tensorboard_run_data(transport: str = 'grpc', request_type=tensorboard_service.WriteTensorboardRunDataRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.write_tensorboard_run_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.WriteTensorboardRunDataResponse( - ) - response = client.write_tensorboard_run_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.WriteTensorboardRunDataRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, tensorboard_service.WriteTensorboardRunDataResponse) - - -def test_write_tensorboard_run_data_from_dict(): - test_write_tensorboard_run_data(request_type=dict) - - -def test_write_tensorboard_run_data_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.write_tensorboard_run_data), - '__call__') as call: - client.write_tensorboard_run_data() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.WriteTensorboardRunDataRequest() - - -@pytest.mark.asyncio -async def test_write_tensorboard_run_data_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.WriteTensorboardRunDataRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.write_tensorboard_run_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.WriteTensorboardRunDataResponse( - )) - response = await client.write_tensorboard_run_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.WriteTensorboardRunDataRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, tensorboard_service.WriteTensorboardRunDataResponse) - - -@pytest.mark.asyncio -async def test_write_tensorboard_run_data_async_from_dict(): - await test_write_tensorboard_run_data_async(request_type=dict) - - -def test_write_tensorboard_run_data_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.WriteTensorboardRunDataRequest() - - request.tensorboard_run = 'tensorboard_run/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.write_tensorboard_run_data), - '__call__') as call: - call.return_value = tensorboard_service.WriteTensorboardRunDataResponse() - client.write_tensorboard_run_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'tensorboard_run=tensorboard_run/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_write_tensorboard_run_data_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.WriteTensorboardRunDataRequest() - - request.tensorboard_run = 'tensorboard_run/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.write_tensorboard_run_data), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.WriteTensorboardRunDataResponse()) - await client.write_tensorboard_run_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'tensorboard_run=tensorboard_run/value', - ) in kw['metadata'] - - -def test_write_tensorboard_run_data_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.write_tensorboard_run_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.WriteTensorboardRunDataResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.write_tensorboard_run_data( - tensorboard_run='tensorboard_run_value', - time_series_data=[tensorboard_data.TimeSeriesData(tensorboard_time_series_id='tensorboard_time_series_id_value')], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].tensorboard_run - mock_val = 'tensorboard_run_value' - assert arg == mock_val - arg = args[0].time_series_data - mock_val = [tensorboard_data.TimeSeriesData(tensorboard_time_series_id='tensorboard_time_series_id_value')] - assert arg == mock_val - - -def test_write_tensorboard_run_data_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.write_tensorboard_run_data( - tensorboard_service.WriteTensorboardRunDataRequest(), - tensorboard_run='tensorboard_run_value', - time_series_data=[tensorboard_data.TimeSeriesData(tensorboard_time_series_id='tensorboard_time_series_id_value')], - ) - - -@pytest.mark.asyncio -async def test_write_tensorboard_run_data_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.write_tensorboard_run_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.WriteTensorboardRunDataResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.WriteTensorboardRunDataResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.write_tensorboard_run_data( - tensorboard_run='tensorboard_run_value', - time_series_data=[tensorboard_data.TimeSeriesData(tensorboard_time_series_id='tensorboard_time_series_id_value')], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].tensorboard_run - mock_val = 'tensorboard_run_value' - assert arg == mock_val - arg = args[0].time_series_data - mock_val = [tensorboard_data.TimeSeriesData(tensorboard_time_series_id='tensorboard_time_series_id_value')] - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_write_tensorboard_run_data_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.write_tensorboard_run_data( - tensorboard_service.WriteTensorboardRunDataRequest(), - tensorboard_run='tensorboard_run_value', - time_series_data=[tensorboard_data.TimeSeriesData(tensorboard_time_series_id='tensorboard_time_series_id_value')], - ) - - -def test_export_tensorboard_time_series_data(transport: str = 'grpc', request_type=tensorboard_service.ExportTensorboardTimeSeriesDataRequest): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_tensorboard_time_series_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.ExportTensorboardTimeSeriesDataResponse( - next_page_token='next_page_token_value', - ) - response = client.export_tensorboard_time_series_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ExportTensorboardTimeSeriesDataRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ExportTensorboardTimeSeriesDataPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_export_tensorboard_time_series_data_from_dict(): - test_export_tensorboard_time_series_data(request_type=dict) - - -def test_export_tensorboard_time_series_data_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_tensorboard_time_series_data), - '__call__') as call: - client.export_tensorboard_time_series_data() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ExportTensorboardTimeSeriesDataRequest() - - -@pytest.mark.asyncio -async def test_export_tensorboard_time_series_data_async(transport: str = 'grpc_asyncio', request_type=tensorboard_service.ExportTensorboardTimeSeriesDataRequest): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_tensorboard_time_series_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ExportTensorboardTimeSeriesDataResponse( - next_page_token='next_page_token_value', - )) - response = await client.export_tensorboard_time_series_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == tensorboard_service.ExportTensorboardTimeSeriesDataRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ExportTensorboardTimeSeriesDataAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_export_tensorboard_time_series_data_async_from_dict(): - await test_export_tensorboard_time_series_data_async(request_type=dict) - - -def test_export_tensorboard_time_series_data_field_headers(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.ExportTensorboardTimeSeriesDataRequest() - - request.tensorboard_time_series = 'tensorboard_time_series/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_tensorboard_time_series_data), - '__call__') as call: - call.return_value = tensorboard_service.ExportTensorboardTimeSeriesDataResponse() - client.export_tensorboard_time_series_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'tensorboard_time_series=tensorboard_time_series/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_export_tensorboard_time_series_data_field_headers_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = tensorboard_service.ExportTensorboardTimeSeriesDataRequest() - - request.tensorboard_time_series = 'tensorboard_time_series/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_tensorboard_time_series_data), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ExportTensorboardTimeSeriesDataResponse()) - await client.export_tensorboard_time_series_data(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'tensorboard_time_series=tensorboard_time_series/value', - ) in kw['metadata'] - - -def test_export_tensorboard_time_series_data_flattened(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_tensorboard_time_series_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.ExportTensorboardTimeSeriesDataResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.export_tensorboard_time_series_data( - tensorboard_time_series='tensorboard_time_series_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].tensorboard_time_series - mock_val = 'tensorboard_time_series_value' - assert arg == mock_val - - -def test_export_tensorboard_time_series_data_flattened_error(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.export_tensorboard_time_series_data( - tensorboard_service.ExportTensorboardTimeSeriesDataRequest(), - tensorboard_time_series='tensorboard_time_series_value', - ) - - -@pytest.mark.asyncio -async def test_export_tensorboard_time_series_data_flattened_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_tensorboard_time_series_data), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = tensorboard_service.ExportTensorboardTimeSeriesDataResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tensorboard_service.ExportTensorboardTimeSeriesDataResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.export_tensorboard_time_series_data( - tensorboard_time_series='tensorboard_time_series_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].tensorboard_time_series - mock_val = 'tensorboard_time_series_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_export_tensorboard_time_series_data_flattened_error_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.export_tensorboard_time_series_data( - tensorboard_service.ExportTensorboardTimeSeriesDataRequest(), - tensorboard_time_series='tensorboard_time_series_value', - ) - - -def test_export_tensorboard_time_series_data_pager(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_tensorboard_time_series_data), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - tensorboard_service.ExportTensorboardTimeSeriesDataResponse( - time_series_data_points=[ - tensorboard_data.TimeSeriesDataPoint(), - tensorboard_data.TimeSeriesDataPoint(), - tensorboard_data.TimeSeriesDataPoint(), - ], - next_page_token='abc', - ), - tensorboard_service.ExportTensorboardTimeSeriesDataResponse( - time_series_data_points=[], - next_page_token='def', - ), - tensorboard_service.ExportTensorboardTimeSeriesDataResponse( - time_series_data_points=[ - tensorboard_data.TimeSeriesDataPoint(), - ], - next_page_token='ghi', - ), - tensorboard_service.ExportTensorboardTimeSeriesDataResponse( - time_series_data_points=[ - tensorboard_data.TimeSeriesDataPoint(), - tensorboard_data.TimeSeriesDataPoint(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('tensorboard_time_series', ''), - )), - ) - pager = client.export_tensorboard_time_series_data(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, tensorboard_data.TimeSeriesDataPoint) - for i in results) - -def test_export_tensorboard_time_series_data_pages(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_tensorboard_time_series_data), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - tensorboard_service.ExportTensorboardTimeSeriesDataResponse( - time_series_data_points=[ - tensorboard_data.TimeSeriesDataPoint(), - tensorboard_data.TimeSeriesDataPoint(), - tensorboard_data.TimeSeriesDataPoint(), - ], - next_page_token='abc', - ), - tensorboard_service.ExportTensorboardTimeSeriesDataResponse( - time_series_data_points=[], - next_page_token='def', - ), - tensorboard_service.ExportTensorboardTimeSeriesDataResponse( - time_series_data_points=[ - tensorboard_data.TimeSeriesDataPoint(), - ], - next_page_token='ghi', - ), - tensorboard_service.ExportTensorboardTimeSeriesDataResponse( - time_series_data_points=[ - tensorboard_data.TimeSeriesDataPoint(), - tensorboard_data.TimeSeriesDataPoint(), - ], - ), - RuntimeError, - ) - pages = list(client.export_tensorboard_time_series_data(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_export_tensorboard_time_series_data_async_pager(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_tensorboard_time_series_data), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - tensorboard_service.ExportTensorboardTimeSeriesDataResponse( - time_series_data_points=[ - tensorboard_data.TimeSeriesDataPoint(), - tensorboard_data.TimeSeriesDataPoint(), - tensorboard_data.TimeSeriesDataPoint(), - ], - next_page_token='abc', - ), - tensorboard_service.ExportTensorboardTimeSeriesDataResponse( - time_series_data_points=[], - next_page_token='def', - ), - tensorboard_service.ExportTensorboardTimeSeriesDataResponse( - time_series_data_points=[ - tensorboard_data.TimeSeriesDataPoint(), - ], - next_page_token='ghi', - ), - tensorboard_service.ExportTensorboardTimeSeriesDataResponse( - time_series_data_points=[ - tensorboard_data.TimeSeriesDataPoint(), - tensorboard_data.TimeSeriesDataPoint(), - ], - ), - RuntimeError, - ) - async_pager = await client.export_tensorboard_time_series_data(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, tensorboard_data.TimeSeriesDataPoint) - for i in responses) - -@pytest.mark.asyncio -async def test_export_tensorboard_time_series_data_async_pages(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.export_tensorboard_time_series_data), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - tensorboard_service.ExportTensorboardTimeSeriesDataResponse( - time_series_data_points=[ - tensorboard_data.TimeSeriesDataPoint(), - tensorboard_data.TimeSeriesDataPoint(), - tensorboard_data.TimeSeriesDataPoint(), - ], - next_page_token='abc', - ), - tensorboard_service.ExportTensorboardTimeSeriesDataResponse( - time_series_data_points=[], - next_page_token='def', - ), - tensorboard_service.ExportTensorboardTimeSeriesDataResponse( - time_series_data_points=[ - tensorboard_data.TimeSeriesDataPoint(), - ], - next_page_token='ghi', - ), - tensorboard_service.ExportTensorboardTimeSeriesDataResponse( - time_series_data_points=[ - tensorboard_data.TimeSeriesDataPoint(), - tensorboard_data.TimeSeriesDataPoint(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.export_tensorboard_time_series_data(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.TensorboardServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.TensorboardServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = TensorboardServiceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.TensorboardServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = TensorboardServiceClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.TensorboardServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = TensorboardServiceClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.TensorboardServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.TensorboardServiceGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.TensorboardServiceGrpcTransport, - transports.TensorboardServiceGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.TensorboardServiceGrpcTransport, - ) - -def test_tensorboard_service_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.TensorboardServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_tensorboard_service_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.aiplatform_v1beta1.services.tensorboard_service.transports.TensorboardServiceTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.TensorboardServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'create_tensorboard', - 'get_tensorboard', - 'update_tensorboard', - 'list_tensorboards', - 'delete_tensorboard', - 'create_tensorboard_experiment', - 'get_tensorboard_experiment', - 'update_tensorboard_experiment', - 'list_tensorboard_experiments', - 'delete_tensorboard_experiment', - 'create_tensorboard_run', - 'batch_create_tensorboard_runs', - 'get_tensorboard_run', - 'update_tensorboard_run', - 'list_tensorboard_runs', - 'delete_tensorboard_run', - 'batch_create_tensorboard_time_series', - 'create_tensorboard_time_series', - 'get_tensorboard_time_series', - 'update_tensorboard_time_series', - 'list_tensorboard_time_series', - 'delete_tensorboard_time_series', - 'batch_read_tensorboard_time_series_data', - 'read_tensorboard_time_series_data', - 'read_tensorboard_blob_data', - 'write_tensorboard_experiment_data', - 'write_tensorboard_run_data', - 'export_tensorboard_time_series_data', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - with pytest.raises(NotImplementedError): - transport.close() - - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client - - -def test_tensorboard_service_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.tensorboard_service.transports.TensorboardServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.TensorboardServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -def test_tensorboard_service_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.tensorboard_service.transports.TensorboardServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.TensorboardServiceTransport() - adc.assert_called_once() - - -def test_tensorboard_service_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - TensorboardServiceClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.TensorboardServiceGrpcTransport, - transports.TensorboardServiceGrpcAsyncIOTransport, - ], -) -def test_tensorboard_service_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.TensorboardServiceGrpcTransport, grpc_helpers), - (transports.TensorboardServiceGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_tensorboard_service_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "aiplatform.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="aiplatform.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.TensorboardServiceGrpcTransport, transports.TensorboardServiceGrpcAsyncIOTransport]) -def test_tensorboard_service_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_tensorboard_service_host_no_port(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_tensorboard_service_host_with_port(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:8000' - -def test_tensorboard_service_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.TensorboardServiceGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_tensorboard_service_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.TensorboardServiceGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.TensorboardServiceGrpcTransport, transports.TensorboardServiceGrpcAsyncIOTransport]) -def test_tensorboard_service_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.TensorboardServiceGrpcTransport, transports.TensorboardServiceGrpcAsyncIOTransport]) -def test_tensorboard_service_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_tensorboard_service_grpc_lro_client(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_tensorboard_service_grpc_lro_async_client(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc_asyncio', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsAsyncClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_tensorboard_path(): - project = "squid" - location = "clam" - tensorboard = "whelk" - expected = "projects/{project}/locations/{location}/tensorboards/{tensorboard}".format(project=project, location=location, tensorboard=tensorboard, ) - actual = TensorboardServiceClient.tensorboard_path(project, location, tensorboard) - assert expected == actual - - -def test_parse_tensorboard_path(): - expected = { - "project": "octopus", - "location": "oyster", - "tensorboard": "nudibranch", - } - path = TensorboardServiceClient.tensorboard_path(**expected) - - # Check that the path construction is reversible. - actual = TensorboardServiceClient.parse_tensorboard_path(path) - assert expected == actual - -def test_tensorboard_experiment_path(): - project = "cuttlefish" - location = "mussel" - tensorboard = "winkle" - experiment = "nautilus" - expected = "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}".format(project=project, location=location, tensorboard=tensorboard, experiment=experiment, ) - actual = TensorboardServiceClient.tensorboard_experiment_path(project, location, tensorboard, experiment) - assert expected == actual - - -def test_parse_tensorboard_experiment_path(): - expected = { - "project": "scallop", - "location": "abalone", - "tensorboard": "squid", - "experiment": "clam", - } - path = TensorboardServiceClient.tensorboard_experiment_path(**expected) - - # Check that the path construction is reversible. - actual = TensorboardServiceClient.parse_tensorboard_experiment_path(path) - assert expected == actual - -def test_tensorboard_run_path(): - project = "whelk" - location = "octopus" - tensorboard = "oyster" - experiment = "nudibranch" - run = "cuttlefish" - expected = "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}".format(project=project, location=location, tensorboard=tensorboard, experiment=experiment, run=run, ) - actual = TensorboardServiceClient.tensorboard_run_path(project, location, tensorboard, experiment, run) - assert expected == actual - - -def test_parse_tensorboard_run_path(): - expected = { - "project": "mussel", - "location": "winkle", - "tensorboard": "nautilus", - "experiment": "scallop", - "run": "abalone", - } - path = TensorboardServiceClient.tensorboard_run_path(**expected) - - # Check that the path construction is reversible. - actual = TensorboardServiceClient.parse_tensorboard_run_path(path) - assert expected == actual - -def test_tensorboard_time_series_path(): - project = "squid" - location = "clam" - tensorboard = "whelk" - experiment = "octopus" - run = "oyster" - time_series = "nudibranch" - expected = "projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}".format(project=project, location=location, tensorboard=tensorboard, experiment=experiment, run=run, time_series=time_series, ) - actual = TensorboardServiceClient.tensorboard_time_series_path(project, location, tensorboard, experiment, run, time_series) - assert expected == actual - - -def test_parse_tensorboard_time_series_path(): - expected = { - "project": "cuttlefish", - "location": "mussel", - "tensorboard": "winkle", - "experiment": "nautilus", - "run": "scallop", - "time_series": "abalone", - } - path = TensorboardServiceClient.tensorboard_time_series_path(**expected) - - # Check that the path construction is reversible. - actual = TensorboardServiceClient.parse_tensorboard_time_series_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "squid" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = TensorboardServiceClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "clam", - } - path = TensorboardServiceClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = TensorboardServiceClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "whelk" - expected = "folders/{folder}".format(folder=folder, ) - actual = TensorboardServiceClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "octopus", - } - path = TensorboardServiceClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = TensorboardServiceClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "oyster" - expected = "organizations/{organization}".format(organization=organization, ) - actual = TensorboardServiceClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "nudibranch", - } - path = TensorboardServiceClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = TensorboardServiceClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "cuttlefish" - expected = "projects/{project}".format(project=project, ) - actual = TensorboardServiceClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "mussel", - } - path = TensorboardServiceClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = TensorboardServiceClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "winkle" - location = "nautilus" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = TensorboardServiceClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "scallop", - "location": "abalone", - } - path = TensorboardServiceClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = TensorboardServiceClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.TensorboardServiceTransport, '_prep_wrapped_messages') as prep: - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.TensorboardServiceTransport, '_prep_wrapped_messages') as prep: - transport_class = TensorboardServiceClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - -@pytest.mark.asyncio -async def test_transport_close_async(): - client = TensorboardServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: - async with client: - close.assert_not_called() - close.assert_called_once() - -def test_transport_close(): - transports = { - "grpc": "_grpc_channel", - } - - for transport, close_name in transports.items(): - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: - with client: - close.assert_not_called() - close.assert_called_once() - -def test_client_ctx(): - transports = [ - 'grpc', - ] - for transport in transports: - client = TensorboardServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - # Test client calls underlying transport. - with mock.patch.object(type(client.transport), "close") as close: - close.assert_not_called() - with client: - pass - close.assert_called() diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_vizier_service.py b/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_vizier_service.py deleted file mode 100644 index 0ee9d9cb72..0000000000 --- a/owl-bot-staging/v1beta1/tests/unit/gapic/aiplatform_v1beta1/test_vizier_service.py +++ /dev/null @@ -1,4630 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import future -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import operation_async # type: ignore -from google.api_core import operations_v1 -from google.api_core import path_template -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.aiplatform_v1beta1.services.vizier_service import VizierServiceAsyncClient -from google.cloud.aiplatform_v1beta1.services.vizier_service import VizierServiceClient -from google.cloud.aiplatform_v1beta1.services.vizier_service import pagers -from google.cloud.aiplatform_v1beta1.services.vizier_service import transports -from google.cloud.aiplatform_v1beta1.types import study -from google.cloud.aiplatform_v1beta1.types import study as gca_study -from google.cloud.aiplatform_v1beta1.types import vizier_service -from google.longrunning import operations_pb2 -from google.oauth2 import service_account -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import struct_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -import google.auth - - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert VizierServiceClient._get_default_mtls_endpoint(None) is None - assert VizierServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert VizierServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert VizierServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert VizierServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert VizierServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - VizierServiceClient, - VizierServiceAsyncClient, -]) -def test_vizier_service_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.VizierServiceGrpcTransport, "grpc"), - (transports.VizierServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_vizier_service_client_service_account_always_use_jwt(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=False) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("client_class", [ - VizierServiceClient, - VizierServiceAsyncClient, -]) -def test_vizier_service_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_vizier_service_client_get_transport_class(): - transport = VizierServiceClient.get_transport_class() - available_transports = [ - transports.VizierServiceGrpcTransport, - ] - assert transport in available_transports - - transport = VizierServiceClient.get_transport_class("grpc") - assert transport == transports.VizierServiceGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (VizierServiceClient, transports.VizierServiceGrpcTransport, "grpc"), - (VizierServiceAsyncClient, transports.VizierServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(VizierServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VizierServiceClient)) -@mock.patch.object(VizierServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VizierServiceAsyncClient)) -def test_vizier_service_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(VizierServiceClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(VizierServiceClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (VizierServiceClient, transports.VizierServiceGrpcTransport, "grpc", "true"), - (VizierServiceAsyncClient, transports.VizierServiceGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (VizierServiceClient, transports.VizierServiceGrpcTransport, "grpc", "false"), - (VizierServiceAsyncClient, transports.VizierServiceGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(VizierServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VizierServiceClient)) -@mock.patch.object(VizierServiceAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VizierServiceAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_vizier_service_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (VizierServiceClient, transports.VizierServiceGrpcTransport, "grpc"), - (VizierServiceAsyncClient, transports.VizierServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_vizier_service_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (VizierServiceClient, transports.VizierServiceGrpcTransport, "grpc"), - (VizierServiceAsyncClient, transports.VizierServiceGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_vizier_service_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_vizier_service_client_client_options_from_dict(): - with mock.patch('google.cloud.aiplatform_v1beta1.services.vizier_service.transports.VizierServiceGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = VizierServiceClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_create_study(transport: str = 'grpc', request_type=vizier_service.CreateStudyRequest): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_study), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_study.Study( - name='name_value', - display_name='display_name_value', - state=gca_study.Study.State.ACTIVE, - inactive_reason='inactive_reason_value', - ) - response = client.create_study(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.CreateStudyRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_study.Study) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == gca_study.Study.State.ACTIVE - assert response.inactive_reason == 'inactive_reason_value' - - -def test_create_study_from_dict(): - test_create_study(request_type=dict) - - -def test_create_study_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_study), - '__call__') as call: - client.create_study() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.CreateStudyRequest() - - -@pytest.mark.asyncio -async def test_create_study_async(transport: str = 'grpc_asyncio', request_type=vizier_service.CreateStudyRequest): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_study), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gca_study.Study( - name='name_value', - display_name='display_name_value', - state=gca_study.Study.State.ACTIVE, - inactive_reason='inactive_reason_value', - )) - response = await client.create_study(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.CreateStudyRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gca_study.Study) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == gca_study.Study.State.ACTIVE - assert response.inactive_reason == 'inactive_reason_value' - - -@pytest.mark.asyncio -async def test_create_study_async_from_dict(): - await test_create_study_async(request_type=dict) - - -def test_create_study_field_headers(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.CreateStudyRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_study), - '__call__') as call: - call.return_value = gca_study.Study() - client.create_study(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_study_field_headers_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.CreateStudyRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_study), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_study.Study()) - await client.create_study(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_study_flattened(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_study), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_study.Study() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_study( - parent='parent_value', - study=gca_study.Study(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].study - mock_val = gca_study.Study(name='name_value') - assert arg == mock_val - - -def test_create_study_flattened_error(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_study( - vizier_service.CreateStudyRequest(), - parent='parent_value', - study=gca_study.Study(name='name_value'), - ) - - -@pytest.mark.asyncio -async def test_create_study_flattened_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_study), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gca_study.Study() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_study.Study()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_study( - parent='parent_value', - study=gca_study.Study(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].study - mock_val = gca_study.Study(name='name_value') - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_study_flattened_error_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_study( - vizier_service.CreateStudyRequest(), - parent='parent_value', - study=gca_study.Study(name='name_value'), - ) - - -def test_get_study(transport: str = 'grpc', request_type=vizier_service.GetStudyRequest): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_study), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = study.Study( - name='name_value', - display_name='display_name_value', - state=study.Study.State.ACTIVE, - inactive_reason='inactive_reason_value', - ) - response = client.get_study(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.GetStudyRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, study.Study) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == study.Study.State.ACTIVE - assert response.inactive_reason == 'inactive_reason_value' - - -def test_get_study_from_dict(): - test_get_study(request_type=dict) - - -def test_get_study_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_study), - '__call__') as call: - client.get_study() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.GetStudyRequest() - - -@pytest.mark.asyncio -async def test_get_study_async(transport: str = 'grpc_asyncio', request_type=vizier_service.GetStudyRequest): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_study), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(study.Study( - name='name_value', - display_name='display_name_value', - state=study.Study.State.ACTIVE, - inactive_reason='inactive_reason_value', - )) - response = await client.get_study(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.GetStudyRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, study.Study) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == study.Study.State.ACTIVE - assert response.inactive_reason == 'inactive_reason_value' - - -@pytest.mark.asyncio -async def test_get_study_async_from_dict(): - await test_get_study_async(request_type=dict) - - -def test_get_study_field_headers(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.GetStudyRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_study), - '__call__') as call: - call.return_value = study.Study() - client.get_study(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_study_field_headers_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.GetStudyRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_study), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Study()) - await client.get_study(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_study_flattened(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_study), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = study.Study() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_study( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_study_flattened_error(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_study( - vizier_service.GetStudyRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_study_flattened_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_study), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = study.Study() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Study()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_study( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_study_flattened_error_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_study( - vizier_service.GetStudyRequest(), - name='name_value', - ) - - -def test_list_studies(transport: str = 'grpc', request_type=vizier_service.ListStudiesRequest): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_studies), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = vizier_service.ListStudiesResponse( - next_page_token='next_page_token_value', - ) - response = client.list_studies(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.ListStudiesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListStudiesPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_studies_from_dict(): - test_list_studies(request_type=dict) - - -def test_list_studies_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_studies), - '__call__') as call: - client.list_studies() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.ListStudiesRequest() - - -@pytest.mark.asyncio -async def test_list_studies_async(transport: str = 'grpc_asyncio', request_type=vizier_service.ListStudiesRequest): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_studies), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListStudiesResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_studies(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.ListStudiesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListStudiesAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_studies_async_from_dict(): - await test_list_studies_async(request_type=dict) - - -def test_list_studies_field_headers(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.ListStudiesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_studies), - '__call__') as call: - call.return_value = vizier_service.ListStudiesResponse() - client.list_studies(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_studies_field_headers_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.ListStudiesRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_studies), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListStudiesResponse()) - await client.list_studies(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_studies_flattened(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_studies), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = vizier_service.ListStudiesResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_studies( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_studies_flattened_error(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_studies( - vizier_service.ListStudiesRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_studies_flattened_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_studies), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = vizier_service.ListStudiesResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListStudiesResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_studies( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_studies_flattened_error_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_studies( - vizier_service.ListStudiesRequest(), - parent='parent_value', - ) - - -def test_list_studies_pager(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_studies), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - vizier_service.ListStudiesResponse( - studies=[ - study.Study(), - study.Study(), - study.Study(), - ], - next_page_token='abc', - ), - vizier_service.ListStudiesResponse( - studies=[], - next_page_token='def', - ), - vizier_service.ListStudiesResponse( - studies=[ - study.Study(), - ], - next_page_token='ghi', - ), - vizier_service.ListStudiesResponse( - studies=[ - study.Study(), - study.Study(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_studies(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, study.Study) - for i in results) - -def test_list_studies_pages(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_studies), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - vizier_service.ListStudiesResponse( - studies=[ - study.Study(), - study.Study(), - study.Study(), - ], - next_page_token='abc', - ), - vizier_service.ListStudiesResponse( - studies=[], - next_page_token='def', - ), - vizier_service.ListStudiesResponse( - studies=[ - study.Study(), - ], - next_page_token='ghi', - ), - vizier_service.ListStudiesResponse( - studies=[ - study.Study(), - study.Study(), - ], - ), - RuntimeError, - ) - pages = list(client.list_studies(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_studies_async_pager(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_studies), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - vizier_service.ListStudiesResponse( - studies=[ - study.Study(), - study.Study(), - study.Study(), - ], - next_page_token='abc', - ), - vizier_service.ListStudiesResponse( - studies=[], - next_page_token='def', - ), - vizier_service.ListStudiesResponse( - studies=[ - study.Study(), - ], - next_page_token='ghi', - ), - vizier_service.ListStudiesResponse( - studies=[ - study.Study(), - study.Study(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_studies(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, study.Study) - for i in responses) - -@pytest.mark.asyncio -async def test_list_studies_async_pages(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_studies), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - vizier_service.ListStudiesResponse( - studies=[ - study.Study(), - study.Study(), - study.Study(), - ], - next_page_token='abc', - ), - vizier_service.ListStudiesResponse( - studies=[], - next_page_token='def', - ), - vizier_service.ListStudiesResponse( - studies=[ - study.Study(), - ], - next_page_token='ghi', - ), - vizier_service.ListStudiesResponse( - studies=[ - study.Study(), - study.Study(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_studies(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_delete_study(transport: str = 'grpc', request_type=vizier_service.DeleteStudyRequest): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_study), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.delete_study(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.DeleteStudyRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -def test_delete_study_from_dict(): - test_delete_study(request_type=dict) - - -def test_delete_study_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_study), - '__call__') as call: - client.delete_study() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.DeleteStudyRequest() - - -@pytest.mark.asyncio -async def test_delete_study_async(transport: str = 'grpc_asyncio', request_type=vizier_service.DeleteStudyRequest): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_study), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.delete_study(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.DeleteStudyRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_delete_study_async_from_dict(): - await test_delete_study_async(request_type=dict) - - -def test_delete_study_field_headers(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.DeleteStudyRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_study), - '__call__') as call: - call.return_value = None - client.delete_study(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_study_field_headers_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.DeleteStudyRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_study), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.delete_study(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_study_flattened(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_study), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_study( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_delete_study_flattened_error(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_study( - vizier_service.DeleteStudyRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_study_flattened_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_study), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_study( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_study_flattened_error_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_study( - vizier_service.DeleteStudyRequest(), - name='name_value', - ) - - -def test_lookup_study(transport: str = 'grpc', request_type=vizier_service.LookupStudyRequest): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.lookup_study), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = study.Study( - name='name_value', - display_name='display_name_value', - state=study.Study.State.ACTIVE, - inactive_reason='inactive_reason_value', - ) - response = client.lookup_study(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.LookupStudyRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, study.Study) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == study.Study.State.ACTIVE - assert response.inactive_reason == 'inactive_reason_value' - - -def test_lookup_study_from_dict(): - test_lookup_study(request_type=dict) - - -def test_lookup_study_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.lookup_study), - '__call__') as call: - client.lookup_study() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.LookupStudyRequest() - - -@pytest.mark.asyncio -async def test_lookup_study_async(transport: str = 'grpc_asyncio', request_type=vizier_service.LookupStudyRequest): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.lookup_study), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(study.Study( - name='name_value', - display_name='display_name_value', - state=study.Study.State.ACTIVE, - inactive_reason='inactive_reason_value', - )) - response = await client.lookup_study(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.LookupStudyRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, study.Study) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == study.Study.State.ACTIVE - assert response.inactive_reason == 'inactive_reason_value' - - -@pytest.mark.asyncio -async def test_lookup_study_async_from_dict(): - await test_lookup_study_async(request_type=dict) - - -def test_lookup_study_field_headers(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.LookupStudyRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.lookup_study), - '__call__') as call: - call.return_value = study.Study() - client.lookup_study(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_lookup_study_field_headers_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.LookupStudyRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.lookup_study), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Study()) - await client.lookup_study(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_lookup_study_flattened(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.lookup_study), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = study.Study() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.lookup_study( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_lookup_study_flattened_error(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.lookup_study( - vizier_service.LookupStudyRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_lookup_study_flattened_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.lookup_study), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = study.Study() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Study()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.lookup_study( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_lookup_study_flattened_error_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.lookup_study( - vizier_service.LookupStudyRequest(), - parent='parent_value', - ) - - -def test_suggest_trials(transport: str = 'grpc', request_type=vizier_service.SuggestTrialsRequest): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.suggest_trials), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.suggest_trials(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.SuggestTrialsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_suggest_trials_from_dict(): - test_suggest_trials(request_type=dict) - - -def test_suggest_trials_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.suggest_trials), - '__call__') as call: - client.suggest_trials() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.SuggestTrialsRequest() - - -@pytest.mark.asyncio -async def test_suggest_trials_async(transport: str = 'grpc_asyncio', request_type=vizier_service.SuggestTrialsRequest): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.suggest_trials), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.suggest_trials(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.SuggestTrialsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_suggest_trials_async_from_dict(): - await test_suggest_trials_async(request_type=dict) - - -def test_suggest_trials_field_headers(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.SuggestTrialsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.suggest_trials), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.suggest_trials(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_suggest_trials_field_headers_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.SuggestTrialsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.suggest_trials), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.suggest_trials(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_trial(transport: str = 'grpc', request_type=vizier_service.CreateTrialRequest): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_trial), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = study.Trial( - name='name_value', - id='id_value', - state=study.Trial.State.REQUESTED, - client_id='client_id_value', - infeasible_reason='infeasible_reason_value', - custom_job='custom_job_value', - ) - response = client.create_trial(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.CreateTrialRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, study.Trial) - assert response.name == 'name_value' - assert response.id == 'id_value' - assert response.state == study.Trial.State.REQUESTED - assert response.client_id == 'client_id_value' - assert response.infeasible_reason == 'infeasible_reason_value' - assert response.custom_job == 'custom_job_value' - - -def test_create_trial_from_dict(): - test_create_trial(request_type=dict) - - -def test_create_trial_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_trial), - '__call__') as call: - client.create_trial() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.CreateTrialRequest() - - -@pytest.mark.asyncio -async def test_create_trial_async(transport: str = 'grpc_asyncio', request_type=vizier_service.CreateTrialRequest): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_trial), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(study.Trial( - name='name_value', - id='id_value', - state=study.Trial.State.REQUESTED, - client_id='client_id_value', - infeasible_reason='infeasible_reason_value', - custom_job='custom_job_value', - )) - response = await client.create_trial(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.CreateTrialRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, study.Trial) - assert response.name == 'name_value' - assert response.id == 'id_value' - assert response.state == study.Trial.State.REQUESTED - assert response.client_id == 'client_id_value' - assert response.infeasible_reason == 'infeasible_reason_value' - assert response.custom_job == 'custom_job_value' - - -@pytest.mark.asyncio -async def test_create_trial_async_from_dict(): - await test_create_trial_async(request_type=dict) - - -def test_create_trial_field_headers(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.CreateTrialRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_trial), - '__call__') as call: - call.return_value = study.Trial() - client.create_trial(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_trial_field_headers_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.CreateTrialRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_trial), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Trial()) - await client.create_trial(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_trial_flattened(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_trial), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = study.Trial() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_trial( - parent='parent_value', - trial=study.Trial(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].trial - mock_val = study.Trial(name='name_value') - assert arg == mock_val - - -def test_create_trial_flattened_error(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_trial( - vizier_service.CreateTrialRequest(), - parent='parent_value', - trial=study.Trial(name='name_value'), - ) - - -@pytest.mark.asyncio -async def test_create_trial_flattened_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_trial), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = study.Trial() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Trial()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_trial( - parent='parent_value', - trial=study.Trial(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].trial - mock_val = study.Trial(name='name_value') - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_create_trial_flattened_error_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_trial( - vizier_service.CreateTrialRequest(), - parent='parent_value', - trial=study.Trial(name='name_value'), - ) - - -def test_get_trial(transport: str = 'grpc', request_type=vizier_service.GetTrialRequest): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_trial), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = study.Trial( - name='name_value', - id='id_value', - state=study.Trial.State.REQUESTED, - client_id='client_id_value', - infeasible_reason='infeasible_reason_value', - custom_job='custom_job_value', - ) - response = client.get_trial(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.GetTrialRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, study.Trial) - assert response.name == 'name_value' - assert response.id == 'id_value' - assert response.state == study.Trial.State.REQUESTED - assert response.client_id == 'client_id_value' - assert response.infeasible_reason == 'infeasible_reason_value' - assert response.custom_job == 'custom_job_value' - - -def test_get_trial_from_dict(): - test_get_trial(request_type=dict) - - -def test_get_trial_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_trial), - '__call__') as call: - client.get_trial() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.GetTrialRequest() - - -@pytest.mark.asyncio -async def test_get_trial_async(transport: str = 'grpc_asyncio', request_type=vizier_service.GetTrialRequest): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_trial), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(study.Trial( - name='name_value', - id='id_value', - state=study.Trial.State.REQUESTED, - client_id='client_id_value', - infeasible_reason='infeasible_reason_value', - custom_job='custom_job_value', - )) - response = await client.get_trial(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.GetTrialRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, study.Trial) - assert response.name == 'name_value' - assert response.id == 'id_value' - assert response.state == study.Trial.State.REQUESTED - assert response.client_id == 'client_id_value' - assert response.infeasible_reason == 'infeasible_reason_value' - assert response.custom_job == 'custom_job_value' - - -@pytest.mark.asyncio -async def test_get_trial_async_from_dict(): - await test_get_trial_async(request_type=dict) - - -def test_get_trial_field_headers(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.GetTrialRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_trial), - '__call__') as call: - call.return_value = study.Trial() - client.get_trial(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_trial_field_headers_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.GetTrialRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_trial), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Trial()) - await client.get_trial(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_trial_flattened(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_trial), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = study.Trial() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_trial( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_trial_flattened_error(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_trial( - vizier_service.GetTrialRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_trial_flattened_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_trial), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = study.Trial() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Trial()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_trial( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_get_trial_flattened_error_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_trial( - vizier_service.GetTrialRequest(), - name='name_value', - ) - - -def test_list_trials(transport: str = 'grpc', request_type=vizier_service.ListTrialsRequest): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_trials), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = vizier_service.ListTrialsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_trials(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.ListTrialsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListTrialsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_trials_from_dict(): - test_list_trials(request_type=dict) - - -def test_list_trials_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_trials), - '__call__') as call: - client.list_trials() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.ListTrialsRequest() - - -@pytest.mark.asyncio -async def test_list_trials_async(transport: str = 'grpc_asyncio', request_type=vizier_service.ListTrialsRequest): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_trials), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListTrialsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_trials(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.ListTrialsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListTrialsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_trials_async_from_dict(): - await test_list_trials_async(request_type=dict) - - -def test_list_trials_field_headers(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.ListTrialsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_trials), - '__call__') as call: - call.return_value = vizier_service.ListTrialsResponse() - client.list_trials(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_trials_field_headers_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.ListTrialsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_trials), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListTrialsResponse()) - await client.list_trials(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_trials_flattened(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_trials), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = vizier_service.ListTrialsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_trials( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_trials_flattened_error(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_trials( - vizier_service.ListTrialsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_trials_flattened_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_trials), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = vizier_service.ListTrialsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListTrialsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_trials( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_trials_flattened_error_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_trials( - vizier_service.ListTrialsRequest(), - parent='parent_value', - ) - - -def test_list_trials_pager(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_trials), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - vizier_service.ListTrialsResponse( - trials=[ - study.Trial(), - study.Trial(), - study.Trial(), - ], - next_page_token='abc', - ), - vizier_service.ListTrialsResponse( - trials=[], - next_page_token='def', - ), - vizier_service.ListTrialsResponse( - trials=[ - study.Trial(), - ], - next_page_token='ghi', - ), - vizier_service.ListTrialsResponse( - trials=[ - study.Trial(), - study.Trial(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_trials(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, study.Trial) - for i in results) - -def test_list_trials_pages(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_trials), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - vizier_service.ListTrialsResponse( - trials=[ - study.Trial(), - study.Trial(), - study.Trial(), - ], - next_page_token='abc', - ), - vizier_service.ListTrialsResponse( - trials=[], - next_page_token='def', - ), - vizier_service.ListTrialsResponse( - trials=[ - study.Trial(), - ], - next_page_token='ghi', - ), - vizier_service.ListTrialsResponse( - trials=[ - study.Trial(), - study.Trial(), - ], - ), - RuntimeError, - ) - pages = list(client.list_trials(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_trials_async_pager(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_trials), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - vizier_service.ListTrialsResponse( - trials=[ - study.Trial(), - study.Trial(), - study.Trial(), - ], - next_page_token='abc', - ), - vizier_service.ListTrialsResponse( - trials=[], - next_page_token='def', - ), - vizier_service.ListTrialsResponse( - trials=[ - study.Trial(), - ], - next_page_token='ghi', - ), - vizier_service.ListTrialsResponse( - trials=[ - study.Trial(), - study.Trial(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_trials(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, study.Trial) - for i in responses) - -@pytest.mark.asyncio -async def test_list_trials_async_pages(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_trials), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - vizier_service.ListTrialsResponse( - trials=[ - study.Trial(), - study.Trial(), - study.Trial(), - ], - next_page_token='abc', - ), - vizier_service.ListTrialsResponse( - trials=[], - next_page_token='def', - ), - vizier_service.ListTrialsResponse( - trials=[ - study.Trial(), - ], - next_page_token='ghi', - ), - vizier_service.ListTrialsResponse( - trials=[ - study.Trial(), - study.Trial(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_trials(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_add_trial_measurement(transport: str = 'grpc', request_type=vizier_service.AddTrialMeasurementRequest): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_trial_measurement), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = study.Trial( - name='name_value', - id='id_value', - state=study.Trial.State.REQUESTED, - client_id='client_id_value', - infeasible_reason='infeasible_reason_value', - custom_job='custom_job_value', - ) - response = client.add_trial_measurement(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.AddTrialMeasurementRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, study.Trial) - assert response.name == 'name_value' - assert response.id == 'id_value' - assert response.state == study.Trial.State.REQUESTED - assert response.client_id == 'client_id_value' - assert response.infeasible_reason == 'infeasible_reason_value' - assert response.custom_job == 'custom_job_value' - - -def test_add_trial_measurement_from_dict(): - test_add_trial_measurement(request_type=dict) - - -def test_add_trial_measurement_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_trial_measurement), - '__call__') as call: - client.add_trial_measurement() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.AddTrialMeasurementRequest() - - -@pytest.mark.asyncio -async def test_add_trial_measurement_async(transport: str = 'grpc_asyncio', request_type=vizier_service.AddTrialMeasurementRequest): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_trial_measurement), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(study.Trial( - name='name_value', - id='id_value', - state=study.Trial.State.REQUESTED, - client_id='client_id_value', - infeasible_reason='infeasible_reason_value', - custom_job='custom_job_value', - )) - response = await client.add_trial_measurement(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.AddTrialMeasurementRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, study.Trial) - assert response.name == 'name_value' - assert response.id == 'id_value' - assert response.state == study.Trial.State.REQUESTED - assert response.client_id == 'client_id_value' - assert response.infeasible_reason == 'infeasible_reason_value' - assert response.custom_job == 'custom_job_value' - - -@pytest.mark.asyncio -async def test_add_trial_measurement_async_from_dict(): - await test_add_trial_measurement_async(request_type=dict) - - -def test_add_trial_measurement_field_headers(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.AddTrialMeasurementRequest() - - request.trial_name = 'trial_name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_trial_measurement), - '__call__') as call: - call.return_value = study.Trial() - client.add_trial_measurement(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'trial_name=trial_name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_add_trial_measurement_field_headers_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.AddTrialMeasurementRequest() - - request.trial_name = 'trial_name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.add_trial_measurement), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Trial()) - await client.add_trial_measurement(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'trial_name=trial_name/value', - ) in kw['metadata'] - - -def test_complete_trial(transport: str = 'grpc', request_type=vizier_service.CompleteTrialRequest): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.complete_trial), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = study.Trial( - name='name_value', - id='id_value', - state=study.Trial.State.REQUESTED, - client_id='client_id_value', - infeasible_reason='infeasible_reason_value', - custom_job='custom_job_value', - ) - response = client.complete_trial(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.CompleteTrialRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, study.Trial) - assert response.name == 'name_value' - assert response.id == 'id_value' - assert response.state == study.Trial.State.REQUESTED - assert response.client_id == 'client_id_value' - assert response.infeasible_reason == 'infeasible_reason_value' - assert response.custom_job == 'custom_job_value' - - -def test_complete_trial_from_dict(): - test_complete_trial(request_type=dict) - - -def test_complete_trial_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.complete_trial), - '__call__') as call: - client.complete_trial() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.CompleteTrialRequest() - - -@pytest.mark.asyncio -async def test_complete_trial_async(transport: str = 'grpc_asyncio', request_type=vizier_service.CompleteTrialRequest): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.complete_trial), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(study.Trial( - name='name_value', - id='id_value', - state=study.Trial.State.REQUESTED, - client_id='client_id_value', - infeasible_reason='infeasible_reason_value', - custom_job='custom_job_value', - )) - response = await client.complete_trial(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.CompleteTrialRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, study.Trial) - assert response.name == 'name_value' - assert response.id == 'id_value' - assert response.state == study.Trial.State.REQUESTED - assert response.client_id == 'client_id_value' - assert response.infeasible_reason == 'infeasible_reason_value' - assert response.custom_job == 'custom_job_value' - - -@pytest.mark.asyncio -async def test_complete_trial_async_from_dict(): - await test_complete_trial_async(request_type=dict) - - -def test_complete_trial_field_headers(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.CompleteTrialRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.complete_trial), - '__call__') as call: - call.return_value = study.Trial() - client.complete_trial(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_complete_trial_field_headers_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.CompleteTrialRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.complete_trial), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Trial()) - await client.complete_trial(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_trial(transport: str = 'grpc', request_type=vizier_service.DeleteTrialRequest): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_trial), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.delete_trial(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.DeleteTrialRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -def test_delete_trial_from_dict(): - test_delete_trial(request_type=dict) - - -def test_delete_trial_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_trial), - '__call__') as call: - client.delete_trial() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.DeleteTrialRequest() - - -@pytest.mark.asyncio -async def test_delete_trial_async(transport: str = 'grpc_asyncio', request_type=vizier_service.DeleteTrialRequest): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_trial), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.delete_trial(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.DeleteTrialRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_delete_trial_async_from_dict(): - await test_delete_trial_async(request_type=dict) - - -def test_delete_trial_field_headers(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.DeleteTrialRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_trial), - '__call__') as call: - call.return_value = None - client.delete_trial(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_trial_field_headers_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.DeleteTrialRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_trial), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.delete_trial(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_trial_flattened(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_trial), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_trial( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_delete_trial_flattened_error(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_trial( - vizier_service.DeleteTrialRequest(), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_trial_flattened_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_trial), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_trial( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_trial_flattened_error_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_trial( - vizier_service.DeleteTrialRequest(), - name='name_value', - ) - - -def test_check_trial_early_stopping_state(transport: str = 'grpc', request_type=vizier_service.CheckTrialEarlyStoppingStateRequest): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.check_trial_early_stopping_state), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.check_trial_early_stopping_state(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.CheckTrialEarlyStoppingStateRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_check_trial_early_stopping_state_from_dict(): - test_check_trial_early_stopping_state(request_type=dict) - - -def test_check_trial_early_stopping_state_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.check_trial_early_stopping_state), - '__call__') as call: - client.check_trial_early_stopping_state() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.CheckTrialEarlyStoppingStateRequest() - - -@pytest.mark.asyncio -async def test_check_trial_early_stopping_state_async(transport: str = 'grpc_asyncio', request_type=vizier_service.CheckTrialEarlyStoppingStateRequest): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.check_trial_early_stopping_state), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.check_trial_early_stopping_state(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.CheckTrialEarlyStoppingStateRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_check_trial_early_stopping_state_async_from_dict(): - await test_check_trial_early_stopping_state_async(request_type=dict) - - -def test_check_trial_early_stopping_state_field_headers(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.CheckTrialEarlyStoppingStateRequest() - - request.trial_name = 'trial_name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.check_trial_early_stopping_state), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.check_trial_early_stopping_state(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'trial_name=trial_name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_check_trial_early_stopping_state_field_headers_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.CheckTrialEarlyStoppingStateRequest() - - request.trial_name = 'trial_name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.check_trial_early_stopping_state), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.check_trial_early_stopping_state(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'trial_name=trial_name/value', - ) in kw['metadata'] - - -def test_stop_trial(transport: str = 'grpc', request_type=vizier_service.StopTrialRequest): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.stop_trial), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = study.Trial( - name='name_value', - id='id_value', - state=study.Trial.State.REQUESTED, - client_id='client_id_value', - infeasible_reason='infeasible_reason_value', - custom_job='custom_job_value', - ) - response = client.stop_trial(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.StopTrialRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, study.Trial) - assert response.name == 'name_value' - assert response.id == 'id_value' - assert response.state == study.Trial.State.REQUESTED - assert response.client_id == 'client_id_value' - assert response.infeasible_reason == 'infeasible_reason_value' - assert response.custom_job == 'custom_job_value' - - -def test_stop_trial_from_dict(): - test_stop_trial(request_type=dict) - - -def test_stop_trial_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.stop_trial), - '__call__') as call: - client.stop_trial() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.StopTrialRequest() - - -@pytest.mark.asyncio -async def test_stop_trial_async(transport: str = 'grpc_asyncio', request_type=vizier_service.StopTrialRequest): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.stop_trial), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(study.Trial( - name='name_value', - id='id_value', - state=study.Trial.State.REQUESTED, - client_id='client_id_value', - infeasible_reason='infeasible_reason_value', - custom_job='custom_job_value', - )) - response = await client.stop_trial(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.StopTrialRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, study.Trial) - assert response.name == 'name_value' - assert response.id == 'id_value' - assert response.state == study.Trial.State.REQUESTED - assert response.client_id == 'client_id_value' - assert response.infeasible_reason == 'infeasible_reason_value' - assert response.custom_job == 'custom_job_value' - - -@pytest.mark.asyncio -async def test_stop_trial_async_from_dict(): - await test_stop_trial_async(request_type=dict) - - -def test_stop_trial_field_headers(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.StopTrialRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.stop_trial), - '__call__') as call: - call.return_value = study.Trial() - client.stop_trial(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_stop_trial_field_headers_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.StopTrialRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.stop_trial), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(study.Trial()) - await client.stop_trial(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_list_optimal_trials(transport: str = 'grpc', request_type=vizier_service.ListOptimalTrialsRequest): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_optimal_trials), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = vizier_service.ListOptimalTrialsResponse( - ) - response = client.list_optimal_trials(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.ListOptimalTrialsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, vizier_service.ListOptimalTrialsResponse) - - -def test_list_optimal_trials_from_dict(): - test_list_optimal_trials(request_type=dict) - - -def test_list_optimal_trials_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_optimal_trials), - '__call__') as call: - client.list_optimal_trials() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.ListOptimalTrialsRequest() - - -@pytest.mark.asyncio -async def test_list_optimal_trials_async(transport: str = 'grpc_asyncio', request_type=vizier_service.ListOptimalTrialsRequest): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_optimal_trials), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListOptimalTrialsResponse( - )) - response = await client.list_optimal_trials(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == vizier_service.ListOptimalTrialsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, vizier_service.ListOptimalTrialsResponse) - - -@pytest.mark.asyncio -async def test_list_optimal_trials_async_from_dict(): - await test_list_optimal_trials_async(request_type=dict) - - -def test_list_optimal_trials_field_headers(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.ListOptimalTrialsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_optimal_trials), - '__call__') as call: - call.return_value = vizier_service.ListOptimalTrialsResponse() - client.list_optimal_trials(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_optimal_trials_field_headers_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = vizier_service.ListOptimalTrialsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_optimal_trials), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListOptimalTrialsResponse()) - await client.list_optimal_trials(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_optimal_trials_flattened(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_optimal_trials), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = vizier_service.ListOptimalTrialsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_optimal_trials( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_optimal_trials_flattened_error(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_optimal_trials( - vizier_service.ListOptimalTrialsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_optimal_trials_flattened_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_optimal_trials), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = vizier_service.ListOptimalTrialsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(vizier_service.ListOptimalTrialsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_optimal_trials( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_list_optimal_trials_flattened_error_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_optimal_trials( - vizier_service.ListOptimalTrialsRequest(), - parent='parent_value', - ) - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.VizierServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.VizierServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = VizierServiceClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.VizierServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = VizierServiceClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.VizierServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = VizierServiceClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.VizierServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.VizierServiceGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.VizierServiceGrpcTransport, - transports.VizierServiceGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.VizierServiceGrpcTransport, - ) - -def test_vizier_service_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.VizierServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_vizier_service_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.aiplatform_v1beta1.services.vizier_service.transports.VizierServiceTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.VizierServiceTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'create_study', - 'get_study', - 'list_studies', - 'delete_study', - 'lookup_study', - 'suggest_trials', - 'create_trial', - 'get_trial', - 'list_trials', - 'add_trial_measurement', - 'complete_trial', - 'delete_trial', - 'check_trial_early_stopping_state', - 'stop_trial', - 'list_optimal_trials', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - with pytest.raises(NotImplementedError): - transport.close() - - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client - - -def test_vizier_service_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.aiplatform_v1beta1.services.vizier_service.transports.VizierServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.VizierServiceTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -def test_vizier_service_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.aiplatform_v1beta1.services.vizier_service.transports.VizierServiceTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.VizierServiceTransport() - adc.assert_called_once() - - -def test_vizier_service_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - VizierServiceClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.VizierServiceGrpcTransport, - transports.VizierServiceGrpcAsyncIOTransport, - ], -) -def test_vizier_service_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.VizierServiceGrpcTransport, grpc_helpers), - (transports.VizierServiceGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_vizier_service_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "aiplatform.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="aiplatform.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.VizierServiceGrpcTransport, transports.VizierServiceGrpcAsyncIOTransport]) -def test_vizier_service_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_vizier_service_host_no_port(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:443' - - -def test_vizier_service_host_with_port(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='aiplatform.googleapis.com:8000'), - ) - assert client.transport._host == 'aiplatform.googleapis.com:8000' - -def test_vizier_service_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.VizierServiceGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_vizier_service_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.VizierServiceGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.VizierServiceGrpcTransport, transports.VizierServiceGrpcAsyncIOTransport]) -def test_vizier_service_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.VizierServiceGrpcTransport, transports.VizierServiceGrpcAsyncIOTransport]) -def test_vizier_service_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_vizier_service_grpc_lro_client(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_vizier_service_grpc_lro_async_client(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc_asyncio', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsAsyncClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_custom_job_path(): - project = "squid" - location = "clam" - custom_job = "whelk" - expected = "projects/{project}/locations/{location}/customJobs/{custom_job}".format(project=project, location=location, custom_job=custom_job, ) - actual = VizierServiceClient.custom_job_path(project, location, custom_job) - assert expected == actual - - -def test_parse_custom_job_path(): - expected = { - "project": "octopus", - "location": "oyster", - "custom_job": "nudibranch", - } - path = VizierServiceClient.custom_job_path(**expected) - - # Check that the path construction is reversible. - actual = VizierServiceClient.parse_custom_job_path(path) - assert expected == actual - -def test_study_path(): - project = "cuttlefish" - location = "mussel" - study = "winkle" - expected = "projects/{project}/locations/{location}/studies/{study}".format(project=project, location=location, study=study, ) - actual = VizierServiceClient.study_path(project, location, study) - assert expected == actual - - -def test_parse_study_path(): - expected = { - "project": "nautilus", - "location": "scallop", - "study": "abalone", - } - path = VizierServiceClient.study_path(**expected) - - # Check that the path construction is reversible. - actual = VizierServiceClient.parse_study_path(path) - assert expected == actual - -def test_trial_path(): - project = "squid" - location = "clam" - study = "whelk" - trial = "octopus" - expected = "projects/{project}/locations/{location}/studies/{study}/trials/{trial}".format(project=project, location=location, study=study, trial=trial, ) - actual = VizierServiceClient.trial_path(project, location, study, trial) - assert expected == actual - - -def test_parse_trial_path(): - expected = { - "project": "oyster", - "location": "nudibranch", - "study": "cuttlefish", - "trial": "mussel", - } - path = VizierServiceClient.trial_path(**expected) - - # Check that the path construction is reversible. - actual = VizierServiceClient.parse_trial_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "winkle" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = VizierServiceClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "nautilus", - } - path = VizierServiceClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = VizierServiceClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "scallop" - expected = "folders/{folder}".format(folder=folder, ) - actual = VizierServiceClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "abalone", - } - path = VizierServiceClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = VizierServiceClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "squid" - expected = "organizations/{organization}".format(organization=organization, ) - actual = VizierServiceClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "clam", - } - path = VizierServiceClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = VizierServiceClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "whelk" - expected = "projects/{project}".format(project=project, ) - actual = VizierServiceClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "octopus", - } - path = VizierServiceClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = VizierServiceClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "oyster" - location = "nudibranch" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = VizierServiceClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "cuttlefish", - "location": "mussel", - } - path = VizierServiceClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = VizierServiceClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.VizierServiceTransport, '_prep_wrapped_messages') as prep: - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.VizierServiceTransport, '_prep_wrapped_messages') as prep: - transport_class = VizierServiceClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - -@pytest.mark.asyncio -async def test_transport_close_async(): - client = VizierServiceAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: - async with client: - close.assert_not_called() - close.assert_called_once() - -def test_transport_close(): - transports = { - "grpc": "_grpc_channel", - } - - for transport, close_name in transports.items(): - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: - with client: - close.assert_not_called() - close.assert_called_once() - -def test_client_ctx(): - transports = [ - 'grpc', - ] - for transport in transports: - client = VizierServiceClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport - ) - # Test client calls underlying transport. - with mock.patch.object(type(client.transport), "close") as close: - close.assert_not_called() - with client: - pass - close.assert_called() diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/definition_v1beta1/__init__.py b/owl-bot-staging/v1beta1/tests/unit/gapic/definition_v1beta1/__init__.py deleted file mode 100644 index b54a5fcc42..0000000000 --- a/owl-bot-staging/v1beta1/tests/unit/gapic/definition_v1beta1/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/instance_v1beta1/__init__.py b/owl-bot-staging/v1beta1/tests/unit/gapic/instance_v1beta1/__init__.py deleted file mode 100644 index b54a5fcc42..0000000000 --- a/owl-bot-staging/v1beta1/tests/unit/gapic/instance_v1beta1/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/params_v1beta1/__init__.py b/owl-bot-staging/v1beta1/tests/unit/gapic/params_v1beta1/__init__.py deleted file mode 100644 index b54a5fcc42..0000000000 --- a/owl-bot-staging/v1beta1/tests/unit/gapic/params_v1beta1/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/prediction_v1beta1/__init__.py b/owl-bot-staging/v1beta1/tests/unit/gapic/prediction_v1beta1/__init__.py deleted file mode 100644 index b54a5fcc42..0000000000 --- a/owl-bot-staging/v1beta1/tests/unit/gapic/prediction_v1beta1/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/tests/unit/gapic/aiplatform_v1/test_endpoint_service.py b/tests/unit/gapic/aiplatform_v1/test_endpoint_service.py index e312941382..3b92c2af95 100644 --- a/tests/unit/gapic/aiplatform_v1/test_endpoint_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_endpoint_service.py @@ -646,7 +646,9 @@ def test_create_endpoint_flattened(): # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.create_endpoint( - parent="parent_value", endpoint=gca_endpoint.Endpoint(name="name_value"), + parent="parent_value", + endpoint=gca_endpoint.Endpoint(name="name_value"), + endpoint_id="endpoint_id_value", ) # Establish that the underlying call was made with the expected @@ -659,6 +661,9 @@ def test_create_endpoint_flattened(): arg = args[0].endpoint mock_val = gca_endpoint.Endpoint(name="name_value") assert arg == mock_val + arg = args[0].endpoint_id + mock_val = "endpoint_id_value" + assert arg == mock_val def test_create_endpoint_flattened_error(): @@ -671,6 +676,7 @@ def test_create_endpoint_flattened_error(): endpoint_service.CreateEndpointRequest(), parent="parent_value", endpoint=gca_endpoint.Endpoint(name="name_value"), + endpoint_id="endpoint_id_value", ) @@ -691,7 +697,9 @@ async def test_create_endpoint_flattened_async(): # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.create_endpoint( - parent="parent_value", endpoint=gca_endpoint.Endpoint(name="name_value"), + parent="parent_value", + endpoint=gca_endpoint.Endpoint(name="name_value"), + endpoint_id="endpoint_id_value", ) # Establish that the underlying call was made with the expected @@ -704,6 +712,9 @@ async def test_create_endpoint_flattened_async(): arg = args[0].endpoint mock_val = gca_endpoint.Endpoint(name="name_value") assert arg == mock_val + arg = args[0].endpoint_id + mock_val = "endpoint_id_value" + assert arg == mock_val @pytest.mark.asyncio @@ -719,6 +730,7 @@ async def test_create_endpoint_flattened_error_async(): endpoint_service.CreateEndpointRequest(), parent="parent_value", endpoint=gca_endpoint.Endpoint(name="name_value"), + endpoint_id="endpoint_id_value", ) @@ -742,6 +754,7 @@ def test_get_endpoint( description="description_value", etag="etag_value", network="network_value", + enable_private_service_connect=True, model_deployment_monitoring_job="model_deployment_monitoring_job_value", ) response = client.get_endpoint(request) @@ -758,6 +771,7 @@ def test_get_endpoint( assert response.description == "description_value" assert response.etag == "etag_value" assert response.network == "network_value" + assert response.enable_private_service_connect is True assert ( response.model_deployment_monitoring_job == "model_deployment_monitoring_job_value" @@ -805,6 +819,7 @@ async def test_get_endpoint_async( description="description_value", etag="etag_value", network="network_value", + enable_private_service_connect=True, model_deployment_monitoring_job="model_deployment_monitoring_job_value", ) ) @@ -822,6 +837,7 @@ async def test_get_endpoint_async( assert response.description == "description_value" assert response.etag == "etag_value" assert response.network == "network_value" + assert response.enable_private_service_connect is True assert ( response.model_deployment_monitoring_job == "model_deployment_monitoring_job_value" @@ -1334,6 +1350,7 @@ def test_update_endpoint( description="description_value", etag="etag_value", network="network_value", + enable_private_service_connect=True, model_deployment_monitoring_job="model_deployment_monitoring_job_value", ) response = client.update_endpoint(request) @@ -1350,6 +1367,7 @@ def test_update_endpoint( assert response.description == "description_value" assert response.etag == "etag_value" assert response.network == "network_value" + assert response.enable_private_service_connect is True assert ( response.model_deployment_monitoring_job == "model_deployment_monitoring_job_value" @@ -1397,6 +1415,7 @@ async def test_update_endpoint_async( description="description_value", etag="etag_value", network="network_value", + enable_private_service_connect=True, model_deployment_monitoring_job="model_deployment_monitoring_job_value", ) ) @@ -1414,6 +1433,7 @@ async def test_update_endpoint_async( assert response.description == "description_value" assert response.etag == "etag_value" assert response.network == "network_value" + assert response.enable_private_service_connect is True assert ( response.model_deployment_monitoring_job == "model_deployment_monitoring_job_value" diff --git a/tests/unit/gapic/aiplatform_v1/test_featurestore_service.py b/tests/unit/gapic/aiplatform_v1/test_featurestore_service.py index 50ce58abca..90402f0bcd 100644 --- a/tests/unit/gapic/aiplatform_v1/test_featurestore_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_featurestore_service.py @@ -681,6 +681,7 @@ def test_create_featurestore_flattened(): client.create_featurestore( parent="parent_value", featurestore=gca_featurestore.Featurestore(name="name_value"), + featurestore_id="featurestore_id_value", ) # Establish that the underlying call was made with the expected @@ -693,6 +694,9 @@ def test_create_featurestore_flattened(): arg = args[0].featurestore mock_val = gca_featurestore.Featurestore(name="name_value") assert arg == mock_val + arg = args[0].featurestore_id + mock_val = "featurestore_id_value" + assert arg == mock_val def test_create_featurestore_flattened_error(): @@ -707,6 +711,7 @@ def test_create_featurestore_flattened_error(): featurestore_service.CreateFeaturestoreRequest(), parent="parent_value", featurestore=gca_featurestore.Featurestore(name="name_value"), + featurestore_id="featurestore_id_value", ) @@ -731,6 +736,7 @@ async def test_create_featurestore_flattened_async(): response = await client.create_featurestore( parent="parent_value", featurestore=gca_featurestore.Featurestore(name="name_value"), + featurestore_id="featurestore_id_value", ) # Establish that the underlying call was made with the expected @@ -743,6 +749,9 @@ async def test_create_featurestore_flattened_async(): arg = args[0].featurestore mock_val = gca_featurestore.Featurestore(name="name_value") assert arg == mock_val + arg = args[0].featurestore_id + mock_val = "featurestore_id_value" + assert arg == mock_val @pytest.mark.asyncio @@ -758,6 +767,7 @@ async def test_create_featurestore_flattened_error_async(): featurestore_service.CreateFeaturestoreRequest(), parent="parent_value", featurestore=gca_featurestore.Featurestore(name="name_value"), + featurestore_id="featurestore_id_value", ) @@ -2032,6 +2042,7 @@ def test_create_entity_type_flattened(): client.create_entity_type( parent="parent_value", entity_type=gca_entity_type.EntityType(name="name_value"), + entity_type_id="entity_type_id_value", ) # Establish that the underlying call was made with the expected @@ -2044,6 +2055,9 @@ def test_create_entity_type_flattened(): arg = args[0].entity_type mock_val = gca_entity_type.EntityType(name="name_value") assert arg == mock_val + arg = args[0].entity_type_id + mock_val = "entity_type_id_value" + assert arg == mock_val def test_create_entity_type_flattened_error(): @@ -2058,6 +2072,7 @@ def test_create_entity_type_flattened_error(): featurestore_service.CreateEntityTypeRequest(), parent="parent_value", entity_type=gca_entity_type.EntityType(name="name_value"), + entity_type_id="entity_type_id_value", ) @@ -2082,6 +2097,7 @@ async def test_create_entity_type_flattened_async(): response = await client.create_entity_type( parent="parent_value", entity_type=gca_entity_type.EntityType(name="name_value"), + entity_type_id="entity_type_id_value", ) # Establish that the underlying call was made with the expected @@ -2094,6 +2110,9 @@ async def test_create_entity_type_flattened_async(): arg = args[0].entity_type mock_val = gca_entity_type.EntityType(name="name_value") assert arg == mock_val + arg = args[0].entity_type_id + mock_val = "entity_type_id_value" + assert arg == mock_val @pytest.mark.asyncio @@ -2109,6 +2128,7 @@ async def test_create_entity_type_flattened_error_async(): featurestore_service.CreateEntityTypeRequest(), parent="parent_value", entity_type=gca_entity_type.EntityType(name="name_value"), + entity_type_id="entity_type_id_value", ) @@ -3361,7 +3381,9 @@ def test_create_feature_flattened(): # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.create_feature( - parent="parent_value", feature=gca_feature.Feature(name="name_value"), + parent="parent_value", + feature=gca_feature.Feature(name="name_value"), + feature_id="feature_id_value", ) # Establish that the underlying call was made with the expected @@ -3374,6 +3396,9 @@ def test_create_feature_flattened(): arg = args[0].feature mock_val = gca_feature.Feature(name="name_value") assert arg == mock_val + arg = args[0].feature_id + mock_val = "feature_id_value" + assert arg == mock_val def test_create_feature_flattened_error(): @@ -3388,6 +3413,7 @@ def test_create_feature_flattened_error(): featurestore_service.CreateFeatureRequest(), parent="parent_value", feature=gca_feature.Feature(name="name_value"), + feature_id="feature_id_value", ) @@ -3408,7 +3434,9 @@ async def test_create_feature_flattened_async(): # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.create_feature( - parent="parent_value", feature=gca_feature.Feature(name="name_value"), + parent="parent_value", + feature=gca_feature.Feature(name="name_value"), + feature_id="feature_id_value", ) # Establish that the underlying call was made with the expected @@ -3421,6 +3449,9 @@ async def test_create_feature_flattened_async(): arg = args[0].feature mock_val = gca_feature.Feature(name="name_value") assert arg == mock_val + arg = args[0].feature_id + mock_val = "feature_id_value" + assert arg == mock_val @pytest.mark.asyncio @@ -3436,6 +3467,7 @@ async def test_create_feature_flattened_error_async(): featurestore_service.CreateFeatureRequest(), parent="parent_value", feature=gca_feature.Feature(name="name_value"), + feature_id="feature_id_value", ) diff --git a/tests/unit/gapic/aiplatform_v1/test_index_endpoint_service.py b/tests/unit/gapic/aiplatform_v1/test_index_endpoint_service.py index ea0508a4cc..2917a37ed9 100644 --- a/tests/unit/gapic/aiplatform_v1/test_index_endpoint_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_index_endpoint_service.py @@ -778,6 +778,7 @@ def test_get_index_endpoint( description="description_value", etag="etag_value", network="network_value", + enable_private_service_connect=True, ) response = client.get_index_endpoint(request) @@ -793,6 +794,7 @@ def test_get_index_endpoint( assert response.description == "description_value" assert response.etag == "etag_value" assert response.network == "network_value" + assert response.enable_private_service_connect is True def test_get_index_endpoint_from_dict(): @@ -841,6 +843,7 @@ async def test_get_index_endpoint_async( description="description_value", etag="etag_value", network="network_value", + enable_private_service_connect=True, ) ) response = await client.get_index_endpoint(request) @@ -857,6 +860,7 @@ async def test_get_index_endpoint_async( assert response.description == "description_value" assert response.etag == "etag_value" assert response.network == "network_value" + assert response.enable_private_service_connect is True @pytest.mark.asyncio @@ -1436,6 +1440,7 @@ def test_update_index_endpoint( description="description_value", etag="etag_value", network="network_value", + enable_private_service_connect=True, ) response = client.update_index_endpoint(request) @@ -1451,6 +1456,7 @@ def test_update_index_endpoint( assert response.description == "description_value" assert response.etag == "etag_value" assert response.network == "network_value" + assert response.enable_private_service_connect is True def test_update_index_endpoint_from_dict(): @@ -1499,6 +1505,7 @@ async def test_update_index_endpoint_async( description="description_value", etag="etag_value", network="network_value", + enable_private_service_connect=True, ) ) response = await client.update_index_endpoint(request) @@ -1515,6 +1522,7 @@ async def test_update_index_endpoint_async( assert response.description == "description_value" assert response.etag == "etag_value" assert response.network == "network_value" + assert response.enable_private_service_connect is True @pytest.mark.asyncio @@ -2371,6 +2379,252 @@ async def test_undeploy_index_flattened_error_async(): ) +def test_mutate_deployed_index( + transport: str = "grpc", + request_type=index_endpoint_service.MutateDeployedIndexRequest, +): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.mutate_deployed_index), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.mutate_deployed_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.MutateDeployedIndexRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_mutate_deployed_index_from_dict(): + test_mutate_deployed_index(request_type=dict) + + +def test_mutate_deployed_index_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.mutate_deployed_index), "__call__" + ) as call: + client.mutate_deployed_index() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.MutateDeployedIndexRequest() + + +@pytest.mark.asyncio +async def test_mutate_deployed_index_async( + transport: str = "grpc_asyncio", + request_type=index_endpoint_service.MutateDeployedIndexRequest, +): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.mutate_deployed_index), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.mutate_deployed_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.MutateDeployedIndexRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_mutate_deployed_index_async_from_dict(): + await test_mutate_deployed_index_async(request_type=dict) + + +def test_mutate_deployed_index_field_headers(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.MutateDeployedIndexRequest() + + request.index_endpoint = "index_endpoint/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.mutate_deployed_index), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.mutate_deployed_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "index_endpoint=index_endpoint/value",) in kw[ + "metadata" + ] + + +@pytest.mark.asyncio +async def test_mutate_deployed_index_field_headers_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.MutateDeployedIndexRequest() + + request.index_endpoint = "index_endpoint/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.mutate_deployed_index), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.mutate_deployed_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "index_endpoint=index_endpoint/value",) in kw[ + "metadata" + ] + + +def test_mutate_deployed_index_flattened(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.mutate_deployed_index), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.mutate_deployed_index( + index_endpoint="index_endpoint_value", + deployed_index=gca_index_endpoint.DeployedIndex(id="id_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].index_endpoint + mock_val = "index_endpoint_value" + assert arg == mock_val + arg = args[0].deployed_index + mock_val = gca_index_endpoint.DeployedIndex(id="id_value") + assert arg == mock_val + + +def test_mutate_deployed_index_flattened_error(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.mutate_deployed_index( + index_endpoint_service.MutateDeployedIndexRequest(), + index_endpoint="index_endpoint_value", + deployed_index=gca_index_endpoint.DeployedIndex(id="id_value"), + ) + + +@pytest.mark.asyncio +async def test_mutate_deployed_index_flattened_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.mutate_deployed_index), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.mutate_deployed_index( + index_endpoint="index_endpoint_value", + deployed_index=gca_index_endpoint.DeployedIndex(id="id_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].index_endpoint + mock_val = "index_endpoint_value" + assert arg == mock_val + arg = args[0].deployed_index + mock_val = gca_index_endpoint.DeployedIndex(id="id_value") + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_mutate_deployed_index_flattened_error_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.mutate_deployed_index( + index_endpoint_service.MutateDeployedIndexRequest(), + index_endpoint="index_endpoint_value", + deployed_index=gca_index_endpoint.DeployedIndex(id="id_value"), + ) + + def test_credentials_transport_error(): # It is an error to provide credentials and a transport instance. transport = transports.IndexEndpointServiceGrpcTransport( @@ -2477,6 +2731,7 @@ def test_index_endpoint_service_base_transport(): "delete_index_endpoint", "deploy_index", "undeploy_index", + "mutate_deployed_index", ) for method in methods: with pytest.raises(NotImplementedError): diff --git a/tests/unit/gapic/aiplatform_v1/test_job_service.py b/tests/unit/gapic/aiplatform_v1/test_job_service.py index c6c94d7fb5..0a0221dc28 100644 --- a/tests/unit/gapic/aiplatform_v1/test_job_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_job_service.py @@ -61,6 +61,7 @@ from google.cloud.aiplatform_v1.types import job_state from google.cloud.aiplatform_v1.types import machine_resources from google.cloud.aiplatform_v1.types import manual_batch_tuning_parameters +from google.cloud.aiplatform_v1.types import model from google.cloud.aiplatform_v1.types import model_deployment_monitoring_job from google.cloud.aiplatform_v1.types import ( model_deployment_monitoring_job as gca_model_deployment_monitoring_job, @@ -68,6 +69,7 @@ from google.cloud.aiplatform_v1.types import model_monitoring from google.cloud.aiplatform_v1.types import operation as gca_operation from google.cloud.aiplatform_v1.types import study +from google.cloud.aiplatform_v1.types import unmanaged_container_model from google.longrunning import operations_pb2 from google.oauth2 import service_account from google.protobuf import any_pb2 # type: ignore diff --git a/tests/unit/gapic/aiplatform_v1/test_migration_service.py b/tests/unit/gapic/aiplatform_v1/test_migration_service.py index 3b1821e8d7..15277d3072 100644 --- a/tests/unit/gapic/aiplatform_v1/test_migration_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_migration_service.py @@ -1648,20 +1648,18 @@ def test_parse_dataset_path(): def test_dataset_path(): project = "squid" - location = "clam" - dataset = "whelk" - expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( - project=project, location=location, dataset=dataset, + dataset = "clam" + expected = "projects/{project}/datasets/{dataset}".format( + project=project, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, location, dataset) + actual = MigrationServiceClient.dataset_path(project, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "octopus", - "location": "oyster", - "dataset": "nudibranch", + "project": "whelk", + "dataset": "octopus", } path = MigrationServiceClient.dataset_path(**expected) @@ -1671,18 +1669,20 @@ def test_parse_dataset_path(): def test_dataset_path(): - project = "cuttlefish" - dataset = "mussel" - expected = "projects/{project}/datasets/{dataset}".format( - project=project, dataset=dataset, + project = "oyster" + location = "nudibranch" + dataset = "cuttlefish" + expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( + project=project, location=location, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, dataset) + actual = MigrationServiceClient.dataset_path(project, location, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "winkle", + "project": "mussel", + "location": "winkle", "dataset": "nautilus", } path = MigrationServiceClient.dataset_path(**expected) diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_endpoint_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_endpoint_service.py index c069fb4b6a..7c59752cf7 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_endpoint_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_endpoint_service.py @@ -649,7 +649,9 @@ def test_create_endpoint_flattened(): # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.create_endpoint( - parent="parent_value", endpoint=gca_endpoint.Endpoint(name="name_value"), + parent="parent_value", + endpoint=gca_endpoint.Endpoint(name="name_value"), + endpoint_id="endpoint_id_value", ) # Establish that the underlying call was made with the expected @@ -662,6 +664,9 @@ def test_create_endpoint_flattened(): arg = args[0].endpoint mock_val = gca_endpoint.Endpoint(name="name_value") assert arg == mock_val + arg = args[0].endpoint_id + mock_val = "endpoint_id_value" + assert arg == mock_val def test_create_endpoint_flattened_error(): @@ -674,6 +679,7 @@ def test_create_endpoint_flattened_error(): endpoint_service.CreateEndpointRequest(), parent="parent_value", endpoint=gca_endpoint.Endpoint(name="name_value"), + endpoint_id="endpoint_id_value", ) @@ -694,7 +700,9 @@ async def test_create_endpoint_flattened_async(): # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.create_endpoint( - parent="parent_value", endpoint=gca_endpoint.Endpoint(name="name_value"), + parent="parent_value", + endpoint=gca_endpoint.Endpoint(name="name_value"), + endpoint_id="endpoint_id_value", ) # Establish that the underlying call was made with the expected @@ -707,6 +715,9 @@ async def test_create_endpoint_flattened_async(): arg = args[0].endpoint mock_val = gca_endpoint.Endpoint(name="name_value") assert arg == mock_val + arg = args[0].endpoint_id + mock_val = "endpoint_id_value" + assert arg == mock_val @pytest.mark.asyncio @@ -722,6 +733,7 @@ async def test_create_endpoint_flattened_error_async(): endpoint_service.CreateEndpointRequest(), parent="parent_value", endpoint=gca_endpoint.Endpoint(name="name_value"), + endpoint_id="endpoint_id_value", ) @@ -745,6 +757,7 @@ def test_get_endpoint( description="description_value", etag="etag_value", network="network_value", + enable_private_service_connect=True, model_deployment_monitoring_job="model_deployment_monitoring_job_value", ) response = client.get_endpoint(request) @@ -761,6 +774,7 @@ def test_get_endpoint( assert response.description == "description_value" assert response.etag == "etag_value" assert response.network == "network_value" + assert response.enable_private_service_connect is True assert ( response.model_deployment_monitoring_job == "model_deployment_monitoring_job_value" @@ -808,6 +822,7 @@ async def test_get_endpoint_async( description="description_value", etag="etag_value", network="network_value", + enable_private_service_connect=True, model_deployment_monitoring_job="model_deployment_monitoring_job_value", ) ) @@ -825,6 +840,7 @@ async def test_get_endpoint_async( assert response.description == "description_value" assert response.etag == "etag_value" assert response.network == "network_value" + assert response.enable_private_service_connect is True assert ( response.model_deployment_monitoring_job == "model_deployment_monitoring_job_value" @@ -1337,6 +1353,7 @@ def test_update_endpoint( description="description_value", etag="etag_value", network="network_value", + enable_private_service_connect=True, model_deployment_monitoring_job="model_deployment_monitoring_job_value", ) response = client.update_endpoint(request) @@ -1353,6 +1370,7 @@ def test_update_endpoint( assert response.description == "description_value" assert response.etag == "etag_value" assert response.network == "network_value" + assert response.enable_private_service_connect is True assert ( response.model_deployment_monitoring_job == "model_deployment_monitoring_job_value" @@ -1400,6 +1418,7 @@ async def test_update_endpoint_async( description="description_value", etag="etag_value", network="network_value", + enable_private_service_connect=True, model_deployment_monitoring_job="model_deployment_monitoring_job_value", ) ) @@ -1417,6 +1436,7 @@ async def test_update_endpoint_async( assert response.description == "description_value" assert response.etag == "etag_value" assert response.network == "network_value" + assert response.enable_private_service_connect is True assert ( response.model_deployment_monitoring_job == "model_deployment_monitoring_job_value" diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_service.py index b91a29b022..392021dcb0 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_featurestore_service.py @@ -684,6 +684,7 @@ def test_create_featurestore_flattened(): client.create_featurestore( parent="parent_value", featurestore=gca_featurestore.Featurestore(name="name_value"), + featurestore_id="featurestore_id_value", ) # Establish that the underlying call was made with the expected @@ -696,6 +697,9 @@ def test_create_featurestore_flattened(): arg = args[0].featurestore mock_val = gca_featurestore.Featurestore(name="name_value") assert arg == mock_val + arg = args[0].featurestore_id + mock_val = "featurestore_id_value" + assert arg == mock_val def test_create_featurestore_flattened_error(): @@ -710,6 +714,7 @@ def test_create_featurestore_flattened_error(): featurestore_service.CreateFeaturestoreRequest(), parent="parent_value", featurestore=gca_featurestore.Featurestore(name="name_value"), + featurestore_id="featurestore_id_value", ) @@ -734,6 +739,7 @@ async def test_create_featurestore_flattened_async(): response = await client.create_featurestore( parent="parent_value", featurestore=gca_featurestore.Featurestore(name="name_value"), + featurestore_id="featurestore_id_value", ) # Establish that the underlying call was made with the expected @@ -746,6 +752,9 @@ async def test_create_featurestore_flattened_async(): arg = args[0].featurestore mock_val = gca_featurestore.Featurestore(name="name_value") assert arg == mock_val + arg = args[0].featurestore_id + mock_val = "featurestore_id_value" + assert arg == mock_val @pytest.mark.asyncio @@ -761,6 +770,7 @@ async def test_create_featurestore_flattened_error_async(): featurestore_service.CreateFeaturestoreRequest(), parent="parent_value", featurestore=gca_featurestore.Featurestore(name="name_value"), + featurestore_id="featurestore_id_value", ) @@ -2035,6 +2045,7 @@ def test_create_entity_type_flattened(): client.create_entity_type( parent="parent_value", entity_type=gca_entity_type.EntityType(name="name_value"), + entity_type_id="entity_type_id_value", ) # Establish that the underlying call was made with the expected @@ -2047,6 +2058,9 @@ def test_create_entity_type_flattened(): arg = args[0].entity_type mock_val = gca_entity_type.EntityType(name="name_value") assert arg == mock_val + arg = args[0].entity_type_id + mock_val = "entity_type_id_value" + assert arg == mock_val def test_create_entity_type_flattened_error(): @@ -2061,6 +2075,7 @@ def test_create_entity_type_flattened_error(): featurestore_service.CreateEntityTypeRequest(), parent="parent_value", entity_type=gca_entity_type.EntityType(name="name_value"), + entity_type_id="entity_type_id_value", ) @@ -2085,6 +2100,7 @@ async def test_create_entity_type_flattened_async(): response = await client.create_entity_type( parent="parent_value", entity_type=gca_entity_type.EntityType(name="name_value"), + entity_type_id="entity_type_id_value", ) # Establish that the underlying call was made with the expected @@ -2097,6 +2113,9 @@ async def test_create_entity_type_flattened_async(): arg = args[0].entity_type mock_val = gca_entity_type.EntityType(name="name_value") assert arg == mock_val + arg = args[0].entity_type_id + mock_val = "entity_type_id_value" + assert arg == mock_val @pytest.mark.asyncio @@ -2112,6 +2131,7 @@ async def test_create_entity_type_flattened_error_async(): featurestore_service.CreateEntityTypeRequest(), parent="parent_value", entity_type=gca_entity_type.EntityType(name="name_value"), + entity_type_id="entity_type_id_value", ) @@ -3364,7 +3384,9 @@ def test_create_feature_flattened(): # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.create_feature( - parent="parent_value", feature=gca_feature.Feature(name="name_value"), + parent="parent_value", + feature=gca_feature.Feature(name="name_value"), + feature_id="feature_id_value", ) # Establish that the underlying call was made with the expected @@ -3377,6 +3399,9 @@ def test_create_feature_flattened(): arg = args[0].feature mock_val = gca_feature.Feature(name="name_value") assert arg == mock_val + arg = args[0].feature_id + mock_val = "feature_id_value" + assert arg == mock_val def test_create_feature_flattened_error(): @@ -3391,6 +3416,7 @@ def test_create_feature_flattened_error(): featurestore_service.CreateFeatureRequest(), parent="parent_value", feature=gca_feature.Feature(name="name_value"), + feature_id="feature_id_value", ) @@ -3411,7 +3437,9 @@ async def test_create_feature_flattened_async(): # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.create_feature( - parent="parent_value", feature=gca_feature.Feature(name="name_value"), + parent="parent_value", + feature=gca_feature.Feature(name="name_value"), + feature_id="feature_id_value", ) # Establish that the underlying call was made with the expected @@ -3424,6 +3452,9 @@ async def test_create_feature_flattened_async(): arg = args[0].feature mock_val = gca_feature.Feature(name="name_value") assert arg == mock_val + arg = args[0].feature_id + mock_val = "feature_id_value" + assert arg == mock_val @pytest.mark.asyncio @@ -3439,6 +3470,7 @@ async def test_create_feature_flattened_error_async(): featurestore_service.CreateFeatureRequest(), parent="parent_value", feature=gca_feature.Feature(name="name_value"), + feature_id="feature_id_value", ) diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_index_endpoint_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_index_endpoint_service.py index 052d411dff..9e96140fe4 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_index_endpoint_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_index_endpoint_service.py @@ -778,6 +778,7 @@ def test_get_index_endpoint( description="description_value", etag="etag_value", network="network_value", + enable_private_service_connect=True, ) response = client.get_index_endpoint(request) @@ -793,6 +794,7 @@ def test_get_index_endpoint( assert response.description == "description_value" assert response.etag == "etag_value" assert response.network == "network_value" + assert response.enable_private_service_connect is True def test_get_index_endpoint_from_dict(): @@ -841,6 +843,7 @@ async def test_get_index_endpoint_async( description="description_value", etag="etag_value", network="network_value", + enable_private_service_connect=True, ) ) response = await client.get_index_endpoint(request) @@ -857,6 +860,7 @@ async def test_get_index_endpoint_async( assert response.description == "description_value" assert response.etag == "etag_value" assert response.network == "network_value" + assert response.enable_private_service_connect is True @pytest.mark.asyncio @@ -1436,6 +1440,7 @@ def test_update_index_endpoint( description="description_value", etag="etag_value", network="network_value", + enable_private_service_connect=True, ) response = client.update_index_endpoint(request) @@ -1451,6 +1456,7 @@ def test_update_index_endpoint( assert response.description == "description_value" assert response.etag == "etag_value" assert response.network == "network_value" + assert response.enable_private_service_connect is True def test_update_index_endpoint_from_dict(): @@ -1499,6 +1505,7 @@ async def test_update_index_endpoint_async( description="description_value", etag="etag_value", network="network_value", + enable_private_service_connect=True, ) ) response = await client.update_index_endpoint(request) @@ -1515,6 +1522,7 @@ async def test_update_index_endpoint_async( assert response.description == "description_value" assert response.etag == "etag_value" assert response.network == "network_value" + assert response.enable_private_service_connect is True @pytest.mark.asyncio @@ -2371,6 +2379,252 @@ async def test_undeploy_index_flattened_error_async(): ) +def test_mutate_deployed_index( + transport: str = "grpc", + request_type=index_endpoint_service.MutateDeployedIndexRequest, +): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.mutate_deployed_index), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.mutate_deployed_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.MutateDeployedIndexRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_mutate_deployed_index_from_dict(): + test_mutate_deployed_index(request_type=dict) + + +def test_mutate_deployed_index_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.mutate_deployed_index), "__call__" + ) as call: + client.mutate_deployed_index() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.MutateDeployedIndexRequest() + + +@pytest.mark.asyncio +async def test_mutate_deployed_index_async( + transport: str = "grpc_asyncio", + request_type=index_endpoint_service.MutateDeployedIndexRequest, +): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.mutate_deployed_index), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.mutate_deployed_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == index_endpoint_service.MutateDeployedIndexRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_mutate_deployed_index_async_from_dict(): + await test_mutate_deployed_index_async(request_type=dict) + + +def test_mutate_deployed_index_field_headers(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.MutateDeployedIndexRequest() + + request.index_endpoint = "index_endpoint/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.mutate_deployed_index), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.mutate_deployed_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "index_endpoint=index_endpoint/value",) in kw[ + "metadata" + ] + + +@pytest.mark.asyncio +async def test_mutate_deployed_index_field_headers_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = index_endpoint_service.MutateDeployedIndexRequest() + + request.index_endpoint = "index_endpoint/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.mutate_deployed_index), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.mutate_deployed_index(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "index_endpoint=index_endpoint/value",) in kw[ + "metadata" + ] + + +def test_mutate_deployed_index_flattened(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.mutate_deployed_index), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.mutate_deployed_index( + index_endpoint="index_endpoint_value", + deployed_index=gca_index_endpoint.DeployedIndex(id="id_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].index_endpoint + mock_val = "index_endpoint_value" + assert arg == mock_val + arg = args[0].deployed_index + mock_val = gca_index_endpoint.DeployedIndex(id="id_value") + assert arg == mock_val + + +def test_mutate_deployed_index_flattened_error(): + client = IndexEndpointServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.mutate_deployed_index( + index_endpoint_service.MutateDeployedIndexRequest(), + index_endpoint="index_endpoint_value", + deployed_index=gca_index_endpoint.DeployedIndex(id="id_value"), + ) + + +@pytest.mark.asyncio +async def test_mutate_deployed_index_flattened_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.mutate_deployed_index), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.mutate_deployed_index( + index_endpoint="index_endpoint_value", + deployed_index=gca_index_endpoint.DeployedIndex(id="id_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].index_endpoint + mock_val = "index_endpoint_value" + assert arg == mock_val + arg = args[0].deployed_index + mock_val = gca_index_endpoint.DeployedIndex(id="id_value") + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_mutate_deployed_index_flattened_error_async(): + client = IndexEndpointServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.mutate_deployed_index( + index_endpoint_service.MutateDeployedIndexRequest(), + index_endpoint="index_endpoint_value", + deployed_index=gca_index_endpoint.DeployedIndex(id="id_value"), + ) + + def test_credentials_transport_error(): # It is an error to provide credentials and a transport instance. transport = transports.IndexEndpointServiceGrpcTransport( @@ -2477,6 +2731,7 @@ def test_index_endpoint_service_base_transport(): "delete_index_endpoint", "deploy_index", "undeploy_index", + "mutate_deployed_index", ) for method in methods: with pytest.raises(NotImplementedError): diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_job_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_job_service.py index 9ed6e4f841..4538fb5fbe 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_job_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_job_service.py @@ -51,6 +51,7 @@ data_labeling_job as gca_data_labeling_job, ) from google.cloud.aiplatform_v1beta1.types import encryption_spec +from google.cloud.aiplatform_v1beta1.types import env_var from google.cloud.aiplatform_v1beta1.types import explanation from google.cloud.aiplatform_v1beta1.types import explanation_metadata from google.cloud.aiplatform_v1beta1.types import hyperparameter_tuning_job @@ -62,6 +63,7 @@ from google.cloud.aiplatform_v1beta1.types import job_state from google.cloud.aiplatform_v1beta1.types import machine_resources from google.cloud.aiplatform_v1beta1.types import manual_batch_tuning_parameters +from google.cloud.aiplatform_v1beta1.types import model from google.cloud.aiplatform_v1beta1.types import model_deployment_monitoring_job from google.cloud.aiplatform_v1beta1.types import ( model_deployment_monitoring_job as gca_model_deployment_monitoring_job, @@ -69,6 +71,7 @@ from google.cloud.aiplatform_v1beta1.types import model_monitoring from google.cloud.aiplatform_v1beta1.types import operation as gca_operation from google.cloud.aiplatform_v1beta1.types import study +from google.cloud.aiplatform_v1beta1.types import unmanaged_container_model from google.longrunning import operations_pb2 from google.oauth2 import service_account from google.protobuf import any_pb2 # type: ignore